Spaces:
Sleeping
Sleeping
NazmulHasanNihal
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,12 +2,6 @@ from openai import OpenAI
|
|
2 |
import streamlit as st
|
3 |
import os
|
4 |
from datetime import datetime
|
5 |
-
import pandas as pd
|
6 |
-
import json
|
7 |
-
import xml.etree.ElementTree as ET
|
8 |
-
from io import StringIO, BytesIO
|
9 |
-
from time import sleep
|
10 |
-
from tabulate import tabulate
|
11 |
|
12 |
# Load API key securely
|
13 |
API_KEY = os.getenv("NV_API_KEY", "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")
|
@@ -20,12 +14,11 @@ client = OpenAI(
|
|
20 |
api_key=API_KEY
|
21 |
)
|
22 |
|
23 |
-
st.set_page_config(page_title="Nemotron 4 340B", layout="wide")
|
24 |
st.title("Nemotron 4 340B")
|
25 |
|
26 |
# Sidebar content
|
27 |
with st.sidebar:
|
28 |
-
st.markdown("This is
|
29 |
if st.button("Clear Session"):
|
30 |
st.session_state.clear()
|
31 |
st.write(f"Copyright 2023-{datetime.now().year} Present Nazmul Hasan Nihal")
|
@@ -35,49 +28,22 @@ if "openai_model" not in st.session_state:
|
|
35 |
st.session_state['openai_model'] = "nvidia/nemotron-4-340b-instruct"
|
36 |
|
37 |
if "messages" not in st.session_state:
|
38 |
-
st.session_state.messages = [{"role": "system", "content": "You are a helpful
|
39 |
-
|
40 |
-
# Function to display typing animation
|
41 |
-
def display_typing_animation(text):
|
42 |
-
for char in text:
|
43 |
-
st.markdown(char, unsafe_allow_html=True)
|
44 |
-
sleep(0.02)
|
45 |
-
|
46 |
-
# Function to process uploaded files
|
47 |
-
def process_uploaded_file(file):
|
48 |
-
file_type = file.type
|
49 |
-
if file_type == "text/csv":
|
50 |
-
df = pd.read_csv(file)
|
51 |
-
return f"**CSV File Analysis:**\n{tabulate(df.head(), headers='keys', tablefmt='grid')}"
|
52 |
-
elif file_type in ["application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/vnd.ms-excel"]:
|
53 |
-
df = pd.read_excel(file)
|
54 |
-
return f"**Excel File Analysis:**\n{tabulate(df.head(), headers='keys', tablefmt='grid')}"
|
55 |
-
elif file_type == "application/json":
|
56 |
-
data = json.load(file)
|
57 |
-
return f"**JSON File Analysis:**\n```json\n{json.dumps(data, indent=4)}\n```"
|
58 |
-
elif file_type in ["text/xml", "application/xml"]:
|
59 |
-
tree = ET.parse(file)
|
60 |
-
root = tree.getroot()
|
61 |
-
return f"**XML File Root Tag:** {root.tag}\n**Attributes:** {root.attrib}"
|
62 |
-
elif file_type == "application/pdf":
|
63 |
-
return "PDF uploads are not processed for content. Try text-based analysis."
|
64 |
-
else:
|
65 |
-
return "Unsupported file format. Please upload CSV, Excel, JSON, XML, or PDF files."
|
66 |
|
67 |
# Display previous messages
|
68 |
for message in st.session_state.messages:
|
69 |
with st.chat_message(message["role"]):
|
70 |
st.markdown(message["content"])
|
71 |
|
72 |
-
# Handle user input
|
73 |
-
if
|
74 |
-
st.session_state.messages.append({"role": "user", "content":
|
75 |
with st.chat_message("user"):
|
76 |
-
st.markdown(
|
77 |
|
78 |
# Assistant response
|
79 |
with st.chat_message("assistant"):
|
80 |
-
with st.spinner("The assistant is
|
81 |
try:
|
82 |
# Generate response
|
83 |
stream = client.chat.completions.create(
|
@@ -92,7 +58,6 @@ if user_input := st.chat_input("Type a message or upload a file below."):
|
|
92 |
for chunk in stream:
|
93 |
if chunk.choices[0].delta.content:
|
94 |
response_chunks.append(chunk.choices[0].delta.content)
|
95 |
-
display_typing_animation(chunk.choices[0].delta.content)
|
96 |
response = "".join(response_chunks)
|
97 |
st.markdown(response)
|
98 |
|
@@ -100,15 +65,4 @@ if user_input := st.chat_input("Type a message or upload a file below."):
|
|
100 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
101 |
|
102 |
except Exception as e:
|
103 |
-
st.error(f"An error occurred: {e}")
|
104 |
-
|
105 |
-
# Add file upload to chat input
|
106 |
-
st.markdown("### File Upload:")
|
107 |
-
uploaded_file = st.file_uploader("Upload your file (CSV, XLSX, JSON, XML, PDF)", type=["csv", "xlsx", "json", "xml", "pdf"])
|
108 |
-
if uploaded_file:
|
109 |
-
with st.chat_message("user"):
|
110 |
-
st.markdown(f"**Uploaded File:** {uploaded_file.name}")
|
111 |
-
with st.chat_message("assistant"):
|
112 |
-
with st.spinner("Processing the uploaded file..."):
|
113 |
-
file_analysis = process_uploaded_file(uploaded_file)
|
114 |
-
st.markdown(file_analysis)
|
|
|
2 |
import streamlit as st
|
3 |
import os
|
4 |
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Load API key securely
|
7 |
API_KEY = os.getenv("NV_API_KEY", "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")
|
|
|
14 |
api_key=API_KEY
|
15 |
)
|
16 |
|
|
|
17 |
st.title("Nemotron 4 340B")
|
18 |
|
19 |
# Sidebar content
|
20 |
with st.sidebar:
|
21 |
+
st.markdown("This is a basic chatbot. Ask anything. The app is supported by Nazmul Hasan Nihal.")
|
22 |
if st.button("Clear Session"):
|
23 |
st.session_state.clear()
|
24 |
st.write(f"Copyright 2023-{datetime.now().year} Present Nazmul Hasan Nihal")
|
|
|
28 |
st.session_state['openai_model'] = "nvidia/nemotron-4-340b-instruct"
|
29 |
|
30 |
if "messages" not in st.session_state:
|
31 |
+
st.session_state.messages = [{"role": "system", "content": "You are a helpful assistant."}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
# Display previous messages
|
34 |
for message in st.session_state.messages:
|
35 |
with st.chat_message(message["role"]):
|
36 |
st.markdown(message["content"])
|
37 |
|
38 |
+
# Handle user input
|
39 |
+
if prompt := st.chat_input("What is up"):
|
40 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
41 |
with st.chat_message("user"):
|
42 |
+
st.markdown(prompt)
|
43 |
|
44 |
# Assistant response
|
45 |
with st.chat_message("assistant"):
|
46 |
+
with st.spinner("The assistant is thinking... Please wait."):
|
47 |
try:
|
48 |
# Generate response
|
49 |
stream = client.chat.completions.create(
|
|
|
58 |
for chunk in stream:
|
59 |
if chunk.choices[0].delta.content:
|
60 |
response_chunks.append(chunk.choices[0].delta.content)
|
|
|
61 |
response = "".join(response_chunks)
|
62 |
st.markdown(response)
|
63 |
|
|
|
65 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
66 |
|
67 |
except Exception as e:
|
68 |
+
st.error(f"An error occurred: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|