File size: 3,362 Bytes
7d415c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
""" Simple Chatbot
@author: Nigel Gebodh
@email: [email protected]
"""
import numpy as np
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv

load_dotenv()

# Initialize the client
client = OpenAI(
    base_url="https://api-inference.huggingface.co/v1",
    api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')  # Replace with your token
)

# Define Llama 3 model
model_link = "meta-llama/Meta-Llama-3-8B-Instruct"
model_info = {
    'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n 
                      It was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
    'logo': 'Llama_logo.png'
}

# Random dog images for error message
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
              "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
              "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
              "1326984c-39b0-492c-a773-f120d747a7e2.jpg"]

def reset_conversation():
    '''Resets Conversation'''
    st.session_state.conversation = []
    st.session_state.messages = []
    return None

# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))

# Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation)  # Reset button

# Create model description
st.sidebar.write(f"You're now chatting with **Llama 3**")
st.sidebar.markdown(model_info['description'])
st.sidebar.image(model_info['logo'])
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("\nRun into issues? \nTry again later as GPU access might be limited.")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if prompt := st.chat_input(f"Hi, I'm Llama 3, ask me a question"):

    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    # Display assistant response in chat message container
    with st.chat_message("assistant"):

        try:
            stream = client.chat.completions.create(
                model=model_link,
                messages=[
                    {"role": m["role"], "content": m["content"]}
                    for m in st.session_state.messages
                ],
                temperature=temp_values,
                stream=True,
                max_tokens=3000,
            )
    
            response = st.write_stream(stream)

        except Exception as e:
            response = "😵‍💫 Looks like something went wrong! Try again later.\nHere's a random pic of a 🐶:"
            st.write(response)
            random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
            st.image(random_dog_pick)
            st.write("This was the error message:")
            st.write(e)

    st.session_state.messages.append({"role": "assistant", "content": response})