|
""" Simple Chatbot |
|
@author: Nigel Gebodh |
|
@email: [email protected] |
|
|
|
""" |
|
import numpy as np |
|
import streamlit as st |
|
from openai import OpenAI |
|
import os |
|
from dotenv import load_dotenv |
|
|
|
load_dotenv() |
|
|
|
|
|
client = OpenAI( |
|
base_url="https://api-inference.huggingface.co/v1", |
|
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') |
|
) |
|
|
|
|
|
model_link = "meta-llama/Meta-Llama-3-8B-Instruct" |
|
model_info = { |
|
'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n |
|
It was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""", |
|
'logo': 'Llama_logo.png' |
|
} |
|
|
|
|
|
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg", |
|
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg", |
|
"526590d2-8817-4ff0-8c62-fdcba5306d02.jpg", |
|
"1326984c-39b0-492c-a773-f120d747a7e2.jpg"] |
|
|
|
def reset_conversation(): |
|
'''Resets Conversation''' |
|
st.session_state.conversation = [] |
|
st.session_state.messages = [] |
|
return None |
|
|
|
|
|
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5)) |
|
|
|
|
|
st.sidebar.button('Reset Chat', on_click=reset_conversation) |
|
|
|
|
|
st.sidebar.write(f"You're now chatting with **Llama 3**") |
|
st.sidebar.markdown(model_info['description']) |
|
st.sidebar.image(model_info['logo']) |
|
st.sidebar.markdown("*Generated content may be inaccurate or false.*") |
|
st.sidebar.markdown("\nRun into issues? \nTry again later as GPU access might be limited.") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input(f"Hi, I'm Llama 3, ask me a question"): |
|
|
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
|
try: |
|
stream = client.chat.completions.create( |
|
model=model_link, |
|
messages=[ |
|
{"role": m["role"], "content": m["content"]} |
|
for m in st.session_state.messages |
|
], |
|
temperature=temp_values, |
|
stream=True, |
|
max_tokens=3000, |
|
) |
|
|
|
response = st.write_stream(stream) |
|
|
|
except Exception as e: |
|
response = "๐ตโ๐ซ Looks like something went wrong! Try again later.\nHere's a random pic of a ๐ถ:" |
|
st.write(response) |
|
random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))] |
|
st.image(random_dog_pick) |
|
st.write("This was the error message:") |
|
st.write(e) |
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|