Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
3 |
import PyPDF2
|
4 |
import pandas as pd
|
5 |
import torch
|
@@ -8,38 +9,19 @@ import torch
|
|
8 |
st.set_page_config(
|
9 |
page_title="WizNerd Insp",
|
10 |
page_icon="π",
|
11 |
-
layout="
|
12 |
)
|
13 |
|
|
|
|
|
|
|
14 |
# Title with rocket emojis
|
15 |
st.title("π WizNerd Insp π")
|
16 |
|
17 |
-
#
|
18 |
-
PROMPT_TEMPLATE = """Below is an instruction that describes a task, paired with an input that provides further context.
|
19 |
-
You are an experienced inspection methods engineer with expertise in:
|
20 |
-
- Offshore topside structural inspection planning
|
21 |
-
- FLOC classification and RBI methodologies
|
22 |
-
- Degradation mechanism analysis for process systems
|
23 |
-
- ASME/API compliance and integrity engineering
|
24 |
-
|
25 |
-
Write a response that appropriately completes the request following these steps:
|
26 |
-
1. Analyze the context and question requirements
|
27 |
-
2. Identify relevant codes and standards
|
28 |
-
3. Consider equipment criticality factors
|
29 |
-
4. Evaluate potential degradation mechanisms
|
30 |
-
5. Formulate technical recommendation
|
31 |
-
|
32 |
-
### instruction:
|
33 |
-
{}
|
34 |
-
|
35 |
-
### output:
|
36 |
-
<think>
|
37 |
-
{{REASONING}}
|
38 |
-
</think>
|
39 |
-
{{ANSWER}}"""
|
40 |
-
|
41 |
-
# Sidebar file uploader
|
42 |
with st.sidebar:
|
|
|
|
|
43 |
st.header("Upload Documents")
|
44 |
uploaded_file = st.file_uploader(
|
45 |
"Choose a PDF or XLSX file",
|
@@ -55,117 +37,90 @@ if "messages" not in st.session_state:
|
|
55 |
@st.cache_data
|
56 |
def process_file(uploaded_file):
|
57 |
file_content = ""
|
58 |
-
|
59 |
try:
|
60 |
if uploaded_file.type == "application/pdf":
|
61 |
pdf_reader = PyPDF2.PdfReader(uploaded_file)
|
62 |
-
for page in pdf_reader.pages
|
63 |
-
file_content += page.extract_text()
|
64 |
-
|
65 |
elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
|
66 |
df = pd.read_excel(uploaded_file)
|
67 |
-
file_content = df.
|
68 |
-
|
69 |
except Exception as e:
|
70 |
st.error(f"Error processing file: {str(e)}")
|
71 |
-
return None
|
72 |
-
|
73 |
return file_content
|
74 |
|
75 |
-
# Load model and tokenizer with
|
76 |
@st.cache_resource
|
77 |
def load_model():
|
78 |
-
model_name = "amiguel/optimizedModelListing6.1"
|
79 |
-
|
80 |
try:
|
81 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
|
|
|
82 |
model = AutoModelForCausalLM.from_pretrained(
|
83 |
-
|
84 |
device_map="auto",
|
85 |
torch_dtype=torch.float16,
|
86 |
-
|
87 |
)
|
88 |
return model, tokenizer
|
89 |
except Exception as e:
|
90 |
-
st.error(f"
|
91 |
return None, None
|
92 |
|
93 |
model, tokenizer = load_model()
|
94 |
|
95 |
# Display chat messages
|
96 |
for message in st.session_state.messages:
|
97 |
-
with st.chat_message(message["role"]):
|
98 |
-
|
99 |
-
st.markdown(message["content"]["answer"])
|
100 |
-
with st.expander("View Reasoning Process"):
|
101 |
-
st.markdown(message["content"]["reasoning"])
|
102 |
-
else:
|
103 |
-
st.markdown(message["content"])
|
104 |
|
105 |
# Chat input
|
106 |
if prompt := st.chat_input("Ask your inspection question..."):
|
107 |
# Add user message to chat history
|
|
|
|
|
108 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
109 |
-
|
110 |
-
#
|
111 |
-
file_context = ""
|
112 |
-
if uploaded_file is not None:
|
113 |
-
file_context = process_file(uploaded_file)
|
114 |
|
115 |
# Generate response
|
116 |
if model and tokenizer:
|
117 |
-
with st.chat_message("assistant"):
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
# Display response
|
154 |
-
with st.expander("Reasoning Process (Click to view)", expanded=False):
|
155 |
-
st.markdown(f"π **Analysis Steps:**\n{reasoning}")
|
156 |
-
|
157 |
-
st.markdown(f"π **Expert Recommendation:**\n{answer}")
|
158 |
-
|
159 |
-
# Add to chat history
|
160 |
-
st.session_state.messages.append({
|
161 |
-
"role": "assistant",
|
162 |
-
"content": {
|
163 |
-
"answer": answer,
|
164 |
-
"reasoning": reasoning
|
165 |
-
}
|
166 |
-
})
|
167 |
-
|
168 |
-
except Exception as e:
|
169 |
-
st.error(f"Generation error: {str(e)}")
|
170 |
else:
|
171 |
-
st.error("Model not loaded
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
3 |
+
from threading import Thread
|
4 |
import PyPDF2
|
5 |
import pandas as pd
|
6 |
import torch
|
|
|
9 |
st.set_page_config(
|
10 |
page_title="WizNerd Insp",
|
11 |
page_icon="π",
|
12 |
+
layout="centered"
|
13 |
)
|
14 |
|
15 |
+
# Corrected model name (fixed typo)
|
16 |
+
MODEL_NAME = "amiguel/optimizedModelListing6.1" # Changed from "Linsting" to "Listing"
|
17 |
+
|
18 |
# Title with rocket emojis
|
19 |
st.title("π WizNerd Insp π")
|
20 |
|
21 |
+
# Sidebar configuration
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
with st.sidebar:
|
23 |
+
st.header("Configuration")
|
24 |
+
hf_token = st.text_input("HuggingFace Token", type="password")
|
25 |
st.header("Upload Documents")
|
26 |
uploaded_file = st.file_uploader(
|
27 |
"Choose a PDF or XLSX file",
|
|
|
37 |
@st.cache_data
|
38 |
def process_file(uploaded_file):
|
39 |
file_content = ""
|
|
|
40 |
try:
|
41 |
if uploaded_file.type == "application/pdf":
|
42 |
pdf_reader = PyPDF2.PdfReader(uploaded_file)
|
43 |
+
file_content = "\n".join([page.extract_text() for page in pdf_reader.pages])
|
|
|
|
|
44 |
elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
|
45 |
df = pd.read_excel(uploaded_file)
|
46 |
+
file_content = df.to_markdown()
|
|
|
47 |
except Exception as e:
|
48 |
st.error(f"Error processing file: {str(e)}")
|
|
|
|
|
49 |
return file_content
|
50 |
|
51 |
+
# Load model and tokenizer with authentication
|
52 |
@st.cache_resource
|
53 |
def load_model():
|
|
|
|
|
54 |
try:
|
55 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
56 |
+
MODEL_NAME,
|
57 |
+
token=hf_token or True
|
58 |
+
)
|
59 |
model = AutoModelForCausalLM.from_pretrained(
|
60 |
+
MODEL_NAME,
|
61 |
device_map="auto",
|
62 |
torch_dtype=torch.float16,
|
63 |
+
token=hf_token or True
|
64 |
)
|
65 |
return model, tokenizer
|
66 |
except Exception as e:
|
67 |
+
st.error(f"Model loading failed: {str(e)}")
|
68 |
return None, None
|
69 |
|
70 |
model, tokenizer = load_model()
|
71 |
|
72 |
# Display chat messages
|
73 |
for message in st.session_state.messages:
|
74 |
+
with st.chat_message(message["role"], avatar="π§π»" if message["role"] == "user" else "π€"):
|
75 |
+
st.markdown(message["content"])
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
# Chat input
|
78 |
if prompt := st.chat_input("Ask your inspection question..."):
|
79 |
# Add user message to chat history
|
80 |
+
with st.chat_message("user", avatar="π§π»"):
|
81 |
+
st.markdown(prompt)
|
82 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
83 |
+
|
84 |
+
# Prepare context
|
85 |
+
file_context = process_file(uploaded_file) if uploaded_file else ""
|
|
|
|
|
86 |
|
87 |
# Generate response
|
88 |
if model and tokenizer:
|
89 |
+
with st.chat_message("assistant", avatar="π€"):
|
90 |
+
# Prepare prompt template
|
91 |
+
full_prompt = f"""You are an expert inspection engineer. Analyze this context:
|
92 |
+
{file_context}
|
93 |
+
|
94 |
+
Question: {prompt}
|
95 |
+
Answer:"""
|
96 |
+
|
97 |
+
# Create streamer
|
98 |
+
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
|
99 |
+
|
100 |
+
# Tokenize input
|
101 |
+
inputs = tokenizer(
|
102 |
+
full_prompt,
|
103 |
+
return_tensors="pt",
|
104 |
+
max_length=4096,
|
105 |
+
truncation=True
|
106 |
+
).to(model.device)
|
107 |
+
|
108 |
+
# Start generation thread
|
109 |
+
generation_kwargs = dict(
|
110 |
+
inputs,
|
111 |
+
streamer=streamer,
|
112 |
+
max_new_tokens=1024,
|
113 |
+
temperature=0.7,
|
114 |
+
top_p=0.9,
|
115 |
+
repetition_penalty=1.1
|
116 |
+
)
|
117 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
118 |
+
thread.start()
|
119 |
+
|
120 |
+
# Stream response
|
121 |
+
response = st.write_stream(streamer)
|
122 |
+
|
123 |
+
# Add to chat history
|
124 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
else:
|
126 |
+
st.error("Model not loaded - check configuration")
|