File size: 2,868 Bytes
8c4195c
 
 
7eaa6f3
8c4195c
7eaa6f3
 
 
 
 
 
 
 
8c4195c
 
 
 
 
 
7eaa6f3
8c4195c
 
6225e5c
d3a54c8
 
 
 
 
 
6225e5c
 
 
 
 
 
 
 
 
 
 
 
 
d3a54c8
8c4195c
6225e5c
 
d3a54c8
6225e5c
d3a54c8
6225e5c
 
d3a54c8
6225e5c
d3a54c8
 
 
8c4195c
 
 
 
6225e5c
 
8c4195c
 
 
 
 
 
 
 
8462c29
8c4195c
 
 
8462c29
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import transformers
import torch
import gradio as gr
import os

# Retrieve Hugging Face API token from environment variable
hf_token = os.getenv("HF_TOKEN")

# Ensure the token is available
if not hf_token:
    raise ValueError("Hugging Face token not found. Please add it to the secrets in Hugging Face Spaces.")

# Load the chatbot model with the token (for private models or usage limits)
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
pipeline = transformers.pipeline(
    "text-generation",
    model=model_id,
    model_kwargs={"torch_dtype": torch.bfloat16},
    device_map="auto",
    use_auth_token=hf_token  # Use the Hugging Face token here
)

# Predefined data
example_data = [
    {"Institution": "A", "TLR": 70, "GO": 85, "OI": 90, "PR": 75},
    {"Institution": "B", "TLR": 80, "GO": 88, "OI": 85, "PR": 90},
    {"Institution": "C", "TLR": 65, "GO": 80, "OI": 70, "PR": 60},
]

# Format predefined data into a readable string
predefined_context = "Here are the institution rankings based on scores:\n"
for institution in sorted(example_data, key=lambda x: x["TLR"] + x["GO"] + x["OI"] + x["PR"], reverse=True):
    total_score = institution["TLR"] + institution["GO"] + institution["OI"] + institution["PR"]
    predefined_context += f"- {institution['Institution']} (Total Score: {total_score})\n"

# System prompt to provide context to the model
system_prompt = f"""You are an intelligent assistant. Here is some contextual information:

{predefined_context}



When a user asks about rankings, respond with this information. If the user asks general questions, respond appropriately.

"""

# Chatbot function
def chatbot_response(user_message):
    # Combine system prompt with the user's message
    full_prompt = f"{system_prompt}\nUser: {user_message}\nAssistant:"
    
    # Generate a response using the model
    outputs = pipeline(
        full_prompt,
        max_new_tokens=150,  # Adjust token limit as needed
        do_sample=True,
        temperature=0.7,
        top_p=0.9,
    )
    return outputs[0]["generated_text"]

# Gradio interface
def build_gradio_ui():
    with gr.Blocks() as demo:
        gr.Markdown("## Intelligent Chatbot with Predefined Context and AI Responses")
        gr.Markdown("Ask about institution rankings or any general query!")
        with gr.Row():
            user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
            chatbot_output = gr.Textbox(label="Chatbot Response", interactive=False)
        submit_button = gr.Button("Send")
        submit_button.click(chatbot_response, inputs=[user_input], outputs=[chatbot_output])

    return demo

# Launch the Gradio app with a public link
demo = build_gradio_ui()

if __name__ == "__main__":
    demo.launch(share=True)  # Enable public link