Nischal Subedi commited on
Commit
610dd7d
·
1 Parent(s): 4d1e834
Files changed (1) hide show
  1. app.py +82 -0
app.py CHANGED
@@ -89,6 +89,71 @@ Answer:"""
89
  logging.info("No statutes found matching the pattern in the context.")
90
  return "No specific statutes found in the provided context."
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  @lru_cache(maxsize=50)
93
  def process_query_cached(self, query: str, state: str, openai_api_key: str, n_results: int = 5) -> Dict[str, any]:
94
  logging.info(f"Processing query (cache key: '{query}'|'{state}'|key_hidden) with n_results={n_results}")
@@ -176,6 +241,8 @@ Answer:"""
176
  answer_text = "<div class='error-message'><span class='error-icon'>⚠️</span>The AI model returned an empty response. This might be due to the query, context limitations, or temporary issues. Please try rephrasing your question or try again later.</div>"
177
  else:
178
  logging.info("LLM generated answer successfully.")
 
 
179
 
180
  return {"answer": answer_text, "context_used": context}
181
 
@@ -636,6 +703,21 @@ Answer:"""
636
  color: var(--text-primary);
637
  line-height: 1.7;
638
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
639
 
640
  /* Error message styles */
641
  .error-message {
 
89
  logging.info("No statutes found matching the pattern in the context.")
90
  return "No specific statutes found in the provided context."
91
 
92
+ def format_llm_response_for_html(self, text: str) -> str:
93
+ """
94
+ Formats the LLM's raw text response for display in a Gradio HTML component,
95
+ converting Markdown-like elements to HTML. Handles bolding, paragraphs, and lists.
96
+ """
97
+ if not text:
98
+ return ""
99
+
100
+ # 1. Convert markdown bolding (**text**) to HTML <b>text</b>
101
+ formatted_text = re.sub(r'\*\*(.*?)\*\*', r'<b>\1</b>', text)
102
+
103
+ # Split the text into blocks based on two or more consecutive newlines
104
+ # This separates paragraphs and distinct list blocks.
105
+ blocks = re.split(r'\n\s*\n+', formatted_text.strip())
106
+
107
+ html_output_parts = []
108
+
109
+ for block in blocks:
110
+ block = block.strip()
111
+ if not block:
112
+ continue
113
+
114
+ # Check if the block is a list (starts with a list marker on its first non-empty line)
115
+ list_lines = block.split('\n')
116
+
117
+ is_list = False
118
+ list_type = None # 'ul' or 'ol'
119
+
120
+ for line in list_lines:
121
+ stripped_line = line.strip()
122
+ if stripped_line: # Check first non-empty line
123
+ if re.match(r'^[*-]\s*(.*)', stripped_line):
124
+ is_list = True
125
+ list_type = 'ul'
126
+ elif re.match(r'^\d+\.\s*(.*)', stripped_line):
127
+ is_list = True
128
+ list_type = 'ol'
129
+ break # Only need to check the first actual line
130
+
131
+ if is_list:
132
+ # Process as a list
133
+ list_html = f'<{list_type}>'
134
+ for line in list_lines:
135
+ stripped_line = line.strip()
136
+ if not stripped_line:
137
+ continue # Skip empty lines within a list block
138
+
139
+ # Extract content after the list marker
140
+ if list_type == 'ul':
141
+ item_content = re.sub(r'^[*-]\s*', '', stripped_line)
142
+ else: # ol
143
+ item_content = re.sub(r'^\d+\.\s*', '', stripped_line)
144
+
145
+ list_html += f'<li>{item_content.strip()}</li>'
146
+ list_html += f'</{list_type}>'
147
+ html_output_parts.append(list_html)
148
+ else:
149
+ # Process as a paragraph
150
+ # Replace single newlines within the paragraph block with <br>
151
+ paragraph_content = block.replace('\n', '<br>')
152
+ html_output_parts.append(f'<p>{paragraph_content}</p>')
153
+
154
+ return ''.join(html_output_parts)
155
+
156
+
157
  @lru_cache(maxsize=50)
158
  def process_query_cached(self, query: str, state: str, openai_api_key: str, n_results: int = 5) -> Dict[str, any]:
159
  logging.info(f"Processing query (cache key: '{query}'|'{state}'|key_hidden) with n_results={n_results}")
 
241
  answer_text = "<div class='error-message'><span class='error-icon'>⚠️</span>The AI model returned an empty response. This might be due to the query, context limitations, or temporary issues. Please try rephrasing your question or try again later.</div>"
242
  else:
243
  logging.info("LLM generated answer successfully.")
244
+ # Apply the formatting function here to convert Markdown-like syntax to HTML
245
+ answer_text = self.format_llm_response_for_html(answer_text)
246
 
247
  return {"answer": answer_text, "context_used": context}
248
 
 
703
  color: var(--text-primary);
704
  line-height: 1.7;
705
  }
706
+ /* Custom CSS for lists and paragraphs within response-content for better spacing */
707
+ .response-content p {
708
+ margin-bottom: 1em; /* Spacing between paragraphs */
709
+ }
710
+ .response-content ul, .response-content ol {
711
+ margin-bottom: 1em; /* Spacing after lists */
712
+ padding-left: 2em; /* Indent lists */
713
+ }
714
+ .response-content li {
715
+ margin-bottom: 0.5em; /* Spacing between list items */
716
+ }
717
+ .response-content li:last-child {
718
+ margin-bottom: 0;
719
+ }
720
+
721
 
722
  /* Error message styles */
723
  .error-message {