File size: 2,384 Bytes
aec35e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os

import openai
import streamlit as st
from transformers import pipeline

from helpers.foundation_models import *

openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])


st.title("🌟 Streamlit + Hugging Face Demo πŸ€–")


# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []


# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])


with st.expander("Instructions"):
    st.sidebar.markdown(
        r"""
        # 🌟 Streamlit + Hugging Face Demo πŸ€–

        ## Introduction πŸ“–

        This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit. 
        """
    )


option = st.sidebar.selectbox(
    "Which task do you want to do?",
    ("Sentiment Analysis", "Medical Summarization", "ChatGPT"),
)


clear_button = st.sidebar.button("Clear Conversation", key="clear")


# Reset everything
if clear_button:
    st.session_state.messages = []


# React to user input
if prompt := st.chat_input("What is up?"):
    # Display user message in chat message container
    st.chat_message("user").markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    if option == "Sentiment Analysis":
        pipe_sentiment_analysis = pipeline("sentiment-analysis")
        if prompt:
            out = pipe_sentiment_analysis(prompt)
            doc = f"""
                Prompt: {prompt}
                Sentiment: {out[0]["label"]}
                Score: {out[0]["score"]}
            """
    elif option == "Medical Summarization":
        pipe_summarization = pipeline(
            "summarization", model="Falconsai/medical_summarization"
        )
        if prompt:
            out = pipe_summarization(prompt)
            doc = out[0]["summary_text"]
    elif option == "ChatGPT":
        if prompt:
            out = call_chatgpt(query=prompt)
            doc = out
    else:
        None

    response = f"{doc}"
    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        st.markdown(response)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})