NazmulHasanNihal commited on
Commit
d9e8de7
1 Parent(s): d5aef64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -24
app.py CHANGED
@@ -1,54 +1,68 @@
1
  from openai import OpenAI
2
  import streamlit as st
3
- import os
4
- from datetime import datetime
 
 
 
 
 
 
5
 
6
  client = OpenAI(
7
- base_url = "https://integrate.api.nvidia.com/v1",
8
- api_key = "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"
9
  )
10
 
11
-
12
-
13
  st.title("Nemotron 4 340B")
14
 
 
15
  with st.sidebar:
16
- st.markdown("This is a basic chatbot. Ask anything. The app is supported by Nazmul Hasan Nihal")
17
  if st.button("Clear Session"):
18
  st.session_state.clear()
19
  st.write(f"Copyright 2023-{datetime.now().year} Present Nazmul Hasan Nihal")
20
 
 
21
  if "openai_model" not in st.session_state:
22
  st.session_state['openai_model'] = "invidia/nemotron_4"
23
 
24
  if "messages" not in st.session_state:
25
- st.session_state.messages = [{"role": "system", "content":"you are a helpful assistant"}]
26
 
 
27
  for message in st.session_state.messages:
28
  with st.chat_message(message["role"]):
29
  st.markdown(message["content"])
30
 
 
31
  if prompt := st.chat_input("What is up"):
32
  st.session_state.messages.append({"role": "user", "content": prompt})
33
  with st.chat_message("user"):
34
  st.markdown(prompt)
35
 
 
36
  with st.chat_message("assistant"):
37
  with st.spinner("The assistant is thinking... Please wait."):
38
- stream = client.chat.completions.create(
39
- model=st.session_state["openai_model"],
40
- messages = st.session_state.messages,
41
- temperature = 0.5,
42
- top_p = 0.7,
43
- max_tokens = 1024,
44
- stream = True,
45
- )
46
- response_chunks = []
47
- for chunk in stream:
48
- if chunk.choices[0].delta.content is not None:
49
- response_chunks.append(chunk.choices[0].delta.content)
50
- respose = "".join(response_chunks)
51
- st.markdown(response)
52
-
53
- st.session_state.messages.append({"role":"assistant", "content": response})
 
 
 
54
 
 
 
 
1
  from openai import OpenAI
2
  import streamlit as st
3
+ import os
4
+ from datetime import datetime
5
+
6
+ # Load API key securely
7
+ API_KEY = os.getenv("NV_API_KEY", "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")
8
+ if not API_KEY:
9
+ st.error("API key is missing! Please set NV_API_KEY as an environment variable.")
10
+ st.stop()
11
 
12
  client = OpenAI(
13
+ base_url="https://integrate.api.nvidia.com/v1",
14
+ api_key=API_KEY
15
  )
16
 
 
 
17
  st.title("Nemotron 4 340B")
18
 
19
+ # Sidebar content
20
  with st.sidebar:
21
+ st.markdown("This is a basic chatbot. Ask anything. The app is supported by Nazmul Hasan Nihal.")
22
  if st.button("Clear Session"):
23
  st.session_state.clear()
24
  st.write(f"Copyright 2023-{datetime.now().year} Present Nazmul Hasan Nihal")
25
 
26
+ # Initialize session state
27
  if "openai_model" not in st.session_state:
28
  st.session_state['openai_model'] = "invidia/nemotron_4"
29
 
30
  if "messages" not in st.session_state:
31
+ st.session_state.messages = [{"role": "system", "content": "You are a helpful assistant."}]
32
 
33
+ # Display previous messages
34
  for message in st.session_state.messages:
35
  with st.chat_message(message["role"]):
36
  st.markdown(message["content"])
37
 
38
+ # Handle user input
39
  if prompt := st.chat_input("What is up"):
40
  st.session_state.messages.append({"role": "user", "content": prompt})
41
  with st.chat_message("user"):
42
  st.markdown(prompt)
43
 
44
+ # Assistant response
45
  with st.chat_message("assistant"):
46
  with st.spinner("The assistant is thinking... Please wait."):
47
+ try:
48
+ # Generate response
49
+ stream = client.chat.completions.create(
50
+ model=st.session_state["openai_model"],
51
+ messages=st.session_state.messages,
52
+ temperature=0.5,
53
+ top_p=0.7,
54
+ max_tokens=1024,
55
+ stream=True,
56
+ )
57
+ response_chunks = []
58
+ for chunk in stream:
59
+ if chunk.choices[0].delta.content:
60
+ response_chunks.append(chunk.choices[0].delta.content)
61
+ response = "".join(response_chunks)
62
+ st.markdown(response)
63
+
64
+ # Save assistant message
65
+ st.session_state.messages.append({"role": "assistant", "content": response})
66
 
67
+ except Exception as e:
68
+ st.error(f"An error occurred: {e}")