File size: 2,416 Bytes
82172b4
40ddc44
d9e8de7
 
 
 
 
 
 
 
82172b4
 
d9e8de7
 
82172b4
 
40ddc44
 
d9e8de7
40ddc44
b281330
40ddc44
 
20ce52f
40ddc44
d9e8de7
40ddc44
ceb05b5
40ddc44
 
b281330
40ddc44
d9e8de7
024bb9a
 
40ddc44
 
b281330
 
 
40ddc44
b281330
40ddc44
d9e8de7
40ddc44
b281330
d9e8de7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f139a3
d9e8de7
 
 
82172b4
d9e8de7
b281330
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from openai import OpenAI
import streamlit as st
import os
from datetime import datetime

# Load API key securely
API_KEY = os.getenv("NV_API_KEY", "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")
if not API_KEY:
    st.error("API key is missing! Please set NV_API_KEY as an environment variable.")
    st.stop()

client = OpenAI(
    base_url="https://integrate.api.nvidia.com/v1",
    api_key=API_KEY
)

st.title("Nemotron 4 340B")

# Sidebar content
with st.sidebar:
    st.markdown("This is a basic chatbot. Ask anything. The app is supported by Nazmul Hasan Nihal.")
    if st.button("Clear Session"):
        st.session_state.clear()
    st.write(f"Copyright 2023-{datetime.now().year} Present Nazmul Hasan Nihal")

# Initialize session state
if "openai_model" not in st.session_state:
    st.session_state['openai_model'] = "nvidia/nemotron-4-340b-instruct"

if "messages" not in st.session_state:
    st.session_state.messages = [{"role": "system", "content": "You are a helpful assistant."}]

# Display previous messages
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Handle user input
if prompt := st.chat_input("What is up"):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)

    # Assistant response
    with st.chat_message("assistant"):
        with st.spinner("The assistant is thinking... Please wait."):
            try:
                # Generate response
                stream = client.chat.completions.create(
                    model=st.session_state["openai_model"],
                    messages=st.session_state.messages,
                    temperature=0.5,
                    top_p=0.7,
                    max_tokens=1024,
                    stream=True,
                )
                response_chunks = []
                for chunk in stream:
                    if chunk.choices[0].delta.content:
                        response_chunks.append(chunk.choices[0].delta.content)
                response = "".join(response_chunks)
                st.markdown(response)

                # Save assistant message
                st.session_state.messages.append({"role": "assistant", "content": response})

            except Exception as e:
                st.error(f"An error occurred: {e}")