Ani14 commited on
Commit
2a354bb
Β·
verified Β·
1 Parent(s): c8eaa55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -23
app.py CHANGED
@@ -295,6 +295,27 @@ Citations:
295
 
296
  except Exception as e:
297
  st.error(f"❌ Error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  # --- Chat Threads Section ---
299
  st.divider()
300
  st.subheader("πŸ“‚ Your Research Threads")
@@ -329,11 +350,14 @@ st.divider()
329
  st.subheader("πŸ§ͺ Methodology Recommender")
330
 
331
  if st.button("🧠 Suggest Research Methodologies"):
332
- if st.session_state.get("last_report"):
 
333
  try:
334
  method_prompt = [
335
- {"role": "system", "content": "You are a research advisor. Based on the following research report, suggest suitable research methodologies (quantitative, qualitative, ML/AI techniques, etc.). Be concise and give bullet-point suggestions."},
336
- {"role": "user", "content": st.session_state["last_report"]}
 
 
337
  ]
338
  method_output = ""
339
  method_box = st.empty()
@@ -346,41 +370,41 @@ if st.button("🧠 Suggest Research Methodologies"):
346
  except Exception as e:
347
  st.error(f"❌ Methodology suggestion failed: {e}")
348
  else:
349
- st.warning("⚠️ No research report available. Please generate research first.")
350
 
351
- # --- Follow-up Q&A (Contextual to Report + Methodology) ---
352
  st.divider()
353
  st.subheader("πŸ’¬ Follow-up Q&A")
354
 
355
  followup = st.text_input("Ask a follow-up question:", key="follow_up_input")
356
 
357
  if st.button("Ask Follow-up"):
358
- if followup:
 
359
  try:
360
- context_intro = (
361
- "Below is a research report followed by methodology suggestions.\n"
362
- "Use both to answer the user's follow-up question."
363
- )
364
- combined_context = f"{context_intro}\n\n=== Report ===\n{st.session_state['last_report']}\n\n=== Methodology ===\n{st.session_state['methodology_notes']}"
365
-
366
- chat = st.session_state.chat_history + [
367
- {"role": "system", "content": "You are an academic research assistant."},
368
- {"role": "user", "content": combined_context},
369
- {"role": "user", "content": followup}
370
  ]
371
 
372
  response = ""
373
- for chunk in call_llm(chat, max_tokens=1500):
374
- response += chunk
 
 
375
 
376
  st.session_state.chat_history.append({"role": "user", "content": followup})
377
  st.session_state.chat_history.append({"role": "assistant", "content": response})
378
 
379
- with st.chat_message("assistant"):
380
- st.markdown(response)
381
-
382
  except Exception as e:
383
- st.error(f"Follow-up error: {e}")
 
 
384
 
385
  # --- Paper Upload for Review & Improvement ---
386
  st.divider()
@@ -433,4 +457,4 @@ st.subheader("πŸ“œ Full Chat History")
433
  with st.expander("View Chat History", expanded=False):
434
  for msg in st.session_state.chat_history:
435
  with st.chat_message(msg["role"] if msg["role"] in ["user", "assistant"] else "assistant"):
436
- st.markdown(msg["content"])
 
295
 
296
  except Exception as e:
297
  st.error(f"❌ Error: {e}")
298
+ # --- Build Full Context (Research + Thread + Methodology) ---
299
+ def build_full_context():
300
+ full_context = ""
301
+
302
+ # Add Research Report
303
+ if st.session_state.get("last_report"):
304
+ full_context += f"=== Research Report ===\n{st.session_state['last_report']}\n\n"
305
+
306
+ # Add Thread Messages
307
+ if st.session_state.get("current_thread_id"):
308
+ thread_msgs = st.session_state.chat_threads.get(st.session_state.current_thread_id, [])
309
+ for msg in thread_msgs:
310
+ who = "User" if msg["role"] == "user" else "Assistant"
311
+ full_context += f"{who}: {msg['content']}\n\n"
312
+
313
+ # Add Methodology if available
314
+ if st.session_state.get("methodology_notes"):
315
+ full_context += f"=== Methodology Suggestions ===\n{st.session_state['methodology_notes']}\n\n"
316
+
317
+ return full_context
318
+
319
  # --- Chat Threads Section ---
320
  st.divider()
321
  st.subheader("πŸ“‚ Your Research Threads")
 
350
  st.subheader("πŸ§ͺ Methodology Recommender")
351
 
352
  if st.button("🧠 Suggest Research Methodologies"):
353
+ context = build_full_context()
354
+ if context:
355
  try:
356
  method_prompt = [
357
+ {"role": "system", "content": "You are a research advisor."},
358
+ {"role": "user", "content": f"""Given the following conversation, research report, and context, suggest a very detailed and customized research methodology that matches the research objectives discussed.
359
+
360
+ \"\"\"{context}\"\"\""""}
361
  ]
362
  method_output = ""
363
  method_box = st.empty()
 
370
  except Exception as e:
371
  st.error(f"❌ Methodology suggestion failed: {e}")
372
  else:
373
+ st.warning("⚠️ No research context available. Please generate research first.")
374
 
375
+ # --- Follow-up Q&A (Contextual to Full Thread) ---
376
  st.divider()
377
  st.subheader("πŸ’¬ Follow-up Q&A")
378
 
379
  followup = st.text_input("Ask a follow-up question:", key="follow_up_input")
380
 
381
  if st.button("Ask Follow-up"):
382
+ context = build_full_context()
383
+ if followup and context:
384
  try:
385
+ combined_prompt = [
386
+ {"role": "system", "content": "You are an expert academic research assistant."},
387
+ {"role": "user", "content": f"""Use ONLY the following research report, conversation, and methodology suggestions to answer the follow-up question below. Stay fully topic-specific and context-aware.
388
+
389
+ \"\"\"{context}\"\"\"
390
+
391
+ Follow-up Question: {followup}
392
+ """}
 
 
393
  ]
394
 
395
  response = ""
396
+ with st.chat_message("assistant"):
397
+ for chunk in call_llm(combined_prompt, max_tokens=2000):
398
+ response += chunk
399
+ st.markdown(response)
400
 
401
  st.session_state.chat_history.append({"role": "user", "content": followup})
402
  st.session_state.chat_history.append({"role": "assistant", "content": response})
403
 
 
 
 
404
  except Exception as e:
405
+ st.error(f"❌ Follow-up error: {e}")
406
+ else:
407
+ st.warning("⚠️ No sufficient context available. Please generate research first.")
408
 
409
  # --- Paper Upload for Review & Improvement ---
410
  st.divider()
 
457
  with st.expander("View Chat History", expanded=False):
458
  for msg in st.session_state.chat_history:
459
  with st.chat_message(msg["role"] if msg["role"] in ["user", "assistant"] else "assistant"):
460
+ st.markdown(msg["content"])