File size: 2,942 Bytes
8c4195c
 
 
7eaa6f3
8c4195c
7eaa6f3
 
 
 
 
 
 
 
8c4195c
 
 
 
 
 
7eaa6f3
8c4195c
 
 
 
 
 
 
 
 
 
 
 
 
d3a54c8
 
 
 
 
 
 
 
8c4195c
d3a54c8
8c4195c
 
 
 
 
 
d3a54c8
 
 
 
 
 
 
 
 
 
8c4195c
 
 
 
d3a54c8
 
8c4195c
 
 
 
 
 
 
 
8462c29
8c4195c
 
 
8462c29
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import transformers
import torch
import gradio as gr
import os

# Retrieve Hugging Face API token from environment variable
hf_token = os.getenv("HF_TOKEN")

# Ensure the token is available
if not hf_token:
    raise ValueError("Hugging Face token not found. Please add it to the secrets in Hugging Face Spaces.")

# Load the chatbot model with the token (for private models or usage limits)
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
pipeline = transformers.pipeline(
    "text-generation",
    model=model_id,
    model_kwargs={"torch_dtype": torch.bfloat16},
    device_map="auto",
    use_auth_token=hf_token  # Use the Hugging Face token here
)

# Function to calculate scores and rankings
def calculate_ranking(data):
    for institution in data:
        institution["Total"] = (
            institution["TLR"] + institution["GO"] + institution["OI"] + institution["PR"]
        )
    ranked_data = sorted(data, key=lambda x: x["Total"], reverse=True)
    for rank, institution in enumerate(ranked_data, start=1):
        institution["Rank"] = rank
    return ranked_data

# Predefined ranking data
example_data = [
    {"Institution": "A", "TLR": 70, "GO": 85, "OI": 90, "PR": 75},
    {"Institution": "B", "TLR": 80, "GO": 88, "OI": 85, "PR": 90},
    {"Institution": "C", "TLR": 65, "GO": 80, "OI": 70, "PR": 60},
]

# Chatbot function
def chatbot_response(user_message):
    # Check for predefined data queries
    if "rank" in user_message.lower():
        ranked_data = calculate_ranking(example_data)
        response = "Here are the ranks of the institutions:\n"
        for institution in ranked_data:
            response += f"Rank {institution['Rank']}: {institution['Institution']} (Total Score: {institution['Total']})\n"
        return response
    
    # Fallback to model-generated response for out-of-scope questions
    outputs = pipeline(
        user_message,
        max_new_tokens=100,  # Restrict length for unexpected questions
        do_sample=True,
        temperature=0.7,  # Slightly random responses for more natural output
        top_p=0.9,
    )
    return outputs[0]["generated_text"]

# Gradio interface
def build_gradio_ui():
    with gr.Blocks() as demo:
        gr.Markdown("## Chatbot with Predefined Data and AI Responses")
        gr.Markdown("Ask about institution rankings or any other general query!")
        with gr.Row():
            user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
            chatbot_output = gr.Textbox(label="Chatbot Response", interactive=False)
        submit_button = gr.Button("Send")
        submit_button.click(chatbot_response, inputs=[user_input], outputs=[chatbot_output])

    return demo

# Launch the Gradio app with a public link
demo = build_gradio_ui()

if __name__ == "__main__":
    demo.launch(share=True)  # Enable public link