Ani14 commited on
Commit
c8eaa55
Β·
verified Β·
1 Parent(s): bd38cc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -38
app.py CHANGED
@@ -299,29 +299,23 @@ Citations:
299
  st.divider()
300
  st.subheader("πŸ“‚ Your Research Threads")
301
 
302
- user_avatar = "https://cdn-icons-png.flaticon.com/512/9131/9131529.png"
303
- assistant_avatar = "https://cdn-icons-png.flaticon.com/512/4712/4712107.png"
304
-
305
  for tid, chats in st.session_state.chat_threads.items():
306
- with st.container():
307
- with st.expander(f"🧡 Thread {tid[:8]}", expanded=False):
308
- for msg in chats:
309
- avatar = user_avatar if msg['role'] == 'user' else assistant_avatar
310
- bubble_color = "#DCF8C6" if msg['role'] == 'user' else "#F0F0F0"
311
- align = "flex-end" if msg['role'] == 'user' else "flex-start"
312
- st.markdown({msg['content']})
313
- followup = st.text_input(f"πŸ’¬ Continue Thread {tid[:8]}:", key=f"followup_{tid}")
314
- if st.button(f"Ask Follow-up {tid}", key=f"button_{tid}"):
315
- if followup:
316
- with st.spinner("πŸ€– Assistant is typing..."):
317
- response = ""
318
- for chunk in call_llm(st.session_state.chat_threads[tid] + [{"role": "user", "content": followup}], max_tokens=2000):
319
- response += chunk
320
- st.markdown(response)
321
- st.session_state.chat_threads[tid].append({"role": "user", "content": followup})
322
- st.session_state.chat_threads[tid].append({"role": "assistant", "content": response})
323
- save_session_data()
324
- st.rerun()
325
 
326
  # --- Download All Threads Section ---
327
  if st.session_state.chat_threads:
@@ -335,7 +329,7 @@ st.divider()
335
  st.subheader("πŸ§ͺ Methodology Recommender")
336
 
337
  if st.button("🧠 Suggest Research Methodologies"):
338
- if st.session_state["last_report"]:
339
  try:
340
  method_prompt = [
341
  {"role": "system", "content": "You are a research advisor. Based on the following research report, suggest suitable research methodologies (quantitative, qualitative, ML/AI techniques, etc.). Be concise and give bullet-point suggestions."},
@@ -352,7 +346,7 @@ if st.button("🧠 Suggest Research Methodologies"):
352
  except Exception as e:
353
  st.error(f"❌ Methodology suggestion failed: {e}")
354
  else:
355
- st.warning("⚠️ Generate the research report first.")
356
 
357
  # --- Follow-up Q&A (Contextual to Report + Methodology) ---
358
  st.divider()
@@ -360,7 +354,7 @@ st.subheader("πŸ’¬ Follow-up Q&A")
360
 
361
  followup = st.text_input("Ask a follow-up question:", key="follow_up_input")
362
 
363
- if st.button("Ask"):
364
  if followup:
365
  try:
366
  context_intro = (
@@ -382,7 +376,8 @@ if st.button("Ask"):
382
  st.session_state.chat_history.append({"role": "user", "content": followup})
383
  st.session_state.chat_history.append({"role": "assistant", "content": response})
384
 
385
- st.markdown(response)
 
386
 
387
  except Exception as e:
388
  st.error(f"Follow-up error: {e}")
@@ -393,7 +388,7 @@ st.subheader("πŸ“€ Upload Your Paper for Feedback")
393
 
394
  uploaded_file = st.file_uploader("Upload your research paper (.pdf or .txt)", type=["pdf", "txt"])
395
 
396
- if uploaded_file and st.button("🧠 Analyze and Suggest Improvements"):
397
  try:
398
  def extract_text_from_file(file):
399
  if file.name.endswith(".pdf"):
@@ -421,21 +416,21 @@ Be honest and constructive. Here's the full text:
421
  \"\"\"{paper_text}\"\"\""""}
422
  ]
423
 
424
- st.status("πŸ”Ž Analyzing your paper...")
425
- improvement_output = ""
426
- feedback_box = st.empty()
427
- for chunk in call_llm(feedback_prompt, max_tokens=2500):
428
- improvement_output += chunk
429
- feedback_box.markdown(improvement_output, unsafe_allow_html=True)
430
 
431
  except Exception as e:
432
  st.error(f"❌ Error while analyzing paper: {e}")
433
 
434
-
435
  # --- Full Chat History Viewer ---
436
  st.divider()
437
- with st.expander("πŸ“œ View Full Chat History", expanded=False):
 
 
438
  for msg in st.session_state.chat_history:
439
- role = msg["role"]
440
- prefix = "πŸ‘€ You" if role == "user" else "πŸ€– Assistant"
441
- st.markdown(f"**{prefix}:** {msg['content']}")
 
299
  st.divider()
300
  st.subheader("πŸ“‚ Your Research Threads")
301
 
 
 
 
302
  for tid, chats in st.session_state.chat_threads.items():
303
+ with st.expander(f"🧡 Thread {tid[:8]}", expanded=False):
304
+ for msg in chats:
305
+ with st.chat_message(msg["role"] if msg["role"] in ["user", "assistant"] else "assistant"):
306
+ st.markdown(msg["content"])
307
+
308
+ followup = st.text_input(f"πŸ’¬ Continue Thread {tid[:8]}:", key=f"followup_{tid}")
309
+ if st.button(f"Ask Follow-up {tid}", key=f"button_{tid}"):
310
+ if followup:
311
+ with st.spinner("πŸ€– Assistant is typing..."):
312
+ response = ""
313
+ for chunk in call_llm(st.session_state.chat_threads[tid] + [{"role": "user", "content": followup}], max_tokens=2000):
314
+ response += chunk
315
+ st.session_state.chat_threads[tid].append({"role": "user", "content": followup})
316
+ st.session_state.chat_threads[tid].append({"role": "assistant", "content": response})
317
+ save_session_data()
318
+ st.rerun()
 
 
 
319
 
320
  # --- Download All Threads Section ---
321
  if st.session_state.chat_threads:
 
329
  st.subheader("πŸ§ͺ Methodology Recommender")
330
 
331
  if st.button("🧠 Suggest Research Methodologies"):
332
+ if st.session_state.get("last_report"):
333
  try:
334
  method_prompt = [
335
  {"role": "system", "content": "You are a research advisor. Based on the following research report, suggest suitable research methodologies (quantitative, qualitative, ML/AI techniques, etc.). Be concise and give bullet-point suggestions."},
 
346
  except Exception as e:
347
  st.error(f"❌ Methodology suggestion failed: {e}")
348
  else:
349
+ st.warning("⚠️ No research report available. Please generate research first.")
350
 
351
  # --- Follow-up Q&A (Contextual to Report + Methodology) ---
352
  st.divider()
 
354
 
355
  followup = st.text_input("Ask a follow-up question:", key="follow_up_input")
356
 
357
+ if st.button("Ask Follow-up"):
358
  if followup:
359
  try:
360
  context_intro = (
 
376
  st.session_state.chat_history.append({"role": "user", "content": followup})
377
  st.session_state.chat_history.append({"role": "assistant", "content": response})
378
 
379
+ with st.chat_message("assistant"):
380
+ st.markdown(response)
381
 
382
  except Exception as e:
383
  st.error(f"Follow-up error: {e}")
 
388
 
389
  uploaded_file = st.file_uploader("Upload your research paper (.pdf or .txt)", type=["pdf", "txt"])
390
 
391
+ if uploaded_file and st.button("🧠 Analyze Paper for Improvements"):
392
  try:
393
  def extract_text_from_file(file):
394
  if file.name.endswith(".pdf"):
 
416
  \"\"\"{paper_text}\"\"\""""}
417
  ]
418
 
419
+ with st.status("πŸ”Ž Analyzing your paper..."):
420
+ improvement_output = ""
421
+ feedback_box = st.empty()
422
+ for chunk in call_llm(feedback_prompt, max_tokens=2500):
423
+ improvement_output += chunk
424
+ feedback_box.markdown(improvement_output, unsafe_allow_html=True)
425
 
426
  except Exception as e:
427
  st.error(f"❌ Error while analyzing paper: {e}")
428
 
 
429
  # --- Full Chat History Viewer ---
430
  st.divider()
431
+ st.subheader("πŸ“œ Full Chat History")
432
+
433
+ with st.expander("View Chat History", expanded=False):
434
  for msg in st.session_state.chat_history:
435
+ with st.chat_message(msg["role"] if msg["role"] in ["user", "assistant"] else "assistant"):
436
+ st.markdown(msg["content"])