0ndr3 commited on
Commit
a95a961
Β·
verified Β·
1 Parent(s): 6130cf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -14
app.py CHANGED
@@ -88,7 +88,7 @@ def load_live_alerts():
88
  approx_value_at_win = tickets_sold * ticket_price
89
 
90
  content = (
91
- f"Raffle {num} | Prize: {['prize_name']} | Value: Β£{val:.2f}"
92
  f"Single ticket price: Β£{ticket_price:.2f} | Won @ {date}"
93
  f"Won at tickets sold:"
94
  f"qty: {tickets_sold}"
@@ -103,7 +103,7 @@ def load_live_alerts():
103
  "value": val,
104
  "tickets_sold": tickets_sold,
105
  "percent_path": pct,
106
- "ticket_price": float(row["ticket_price"]),
107
  "approx_value_at_win": approx_value_at_win,
108
  "timestamp": ts_iso,
109
  "source": "recent and live"
@@ -113,8 +113,8 @@ def load_live_alerts():
113
 
114
  # ─── 3) Retriever ───────────────────────────────────────────────────────────────
115
 
116
- db = build_chroma_db()
117
- live_docs = load_live_alerts()
118
 
119
  def combined_docs(q: str):
120
  hist = db.similarity_search(q, k=8)
@@ -129,13 +129,10 @@ You are **Rafael The Raffler**, a calm friendly expert in instant-win raffle ana
129
  If asked β€œwhat do you do?”, give a bullet list of your strengths (raffle timing, value insights, patterns).
130
  Reasoning Rules:
131
  1. **Interpreting β€œWhen”:** Whenever the user asks β€œWhen…?”, interpret that as β€œAt what tickets-sold count and percent did the prize win occur?” Do *not* give calendar dates or times.
132
-
133
  --- Conversation So Far ---
134
  {chat_history}
135
-
136
  --- Raffle Data ---
137
  {context}
138
-
139
  --- Question ---
140
  {question}
141
  """
@@ -156,23 +153,57 @@ def filter_docs(inputs):
156
  docs = [d for d in docs if d.metadata["value"] > thr]
157
  return {"documents":docs, "question":q}
158
 
159
- # ─── 5) RAG + ChatGroq Chain ────────────────────────────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  retrieval_chain = (
162
- RunnableMap({
163
- "documents": lambda x: combined_docs(x if isinstance(x,str) else x["question"]),
164
- "question": lambda x: x if isinstance(x,str) else x["question"]
 
 
 
165
  })
166
  | RunnableLambda(filter_docs)
 
167
  | RunnableLambda(lambda d: {
168
- # only keep the last 4 lines of chat_history
169
  "chat_history": "\n".join(
170
- memory.load_memory_variables({})["chat_history"]
171
- .splitlines()[-4:]
172
  ),
173
  "context": "\n".join(doc.page_content for doc in d["documents"]),
174
  "question": d["question"]
175
  })
 
176
  | RunnableLambda(lambda inp: prompt.format(**inp))
177
  | ChatGroq(api_key=GROQ_API_KEY, model="llama3-8b-8192")
178
  | StrOutputParser()
 
88
  approx_value_at_win = tickets_sold * ticket_price
89
 
90
  content = (
91
+ f"Raffle {num} | Prize: {row['prize_name']} | Value: Β£{val:.2f}"
92
  f"Single ticket price: Β£{ticket_price:.2f} | Won @ {date}"
93
  f"Won at tickets sold:"
94
  f"qty: {tickets_sold}"
 
103
  "value": val,
104
  "tickets_sold": tickets_sold,
105
  "percent_path": pct,
106
+ "ticket_price": ticket_price,
107
  "approx_value_at_win": approx_value_at_win,
108
  "timestamp": ts_iso,
109
  "source": "recent and live"
 
113
 
114
  # ─── 3) Retriever ───────────────────────────────────────────────────────────────
115
 
116
+ db = build_chroma_db()
117
+ live_docs = load_live_alerts()
118
 
119
  def combined_docs(q: str):
120
  hist = db.similarity_search(q, k=8)
 
129
  If asked β€œwhat do you do?”, give a bullet list of your strengths (raffle timing, value insights, patterns).
130
  Reasoning Rules:
131
  1. **Interpreting β€œWhen”:** Whenever the user asks β€œWhen…?”, interpret that as β€œAt what tickets-sold count and percent did the prize win occur?” Do *not* give calendar dates or times.
 
132
  --- Conversation So Far ---
133
  {chat_history}
 
134
  --- Raffle Data ---
135
  {context}
 
136
  --- Question ---
137
  {question}
138
  """
 
153
  docs = [d for d in docs if d.metadata["value"] > thr]
154
  return {"documents":docs, "question":q}
155
 
156
+ # ─── Follow-up Question Rewriting ───────────────────────────────────────────────
157
+
158
+ # This template will turn "How many big prizes...?" into
159
+ # "In raffle 86, how many big prizes in total were won?"
160
+ question_rewrite_template = PromptTemplate(
161
+ input_variables=["chat_history","question"],
162
+ template="""
163
+ Rewrite the following user query to be a fully self-contained question, given the conversation so far.
164
+
165
+ Conversation:
166
+ {chat_history}
167
+
168
+ Follow-up:
169
+ {question}
170
+
171
+ Rewritten standalone question:"""
172
+ )
173
+
174
+ rewrite_chain = (
175
+ # bundle history + raw question
176
+ RunnableLambda(lambda q: {
177
+ "chat_history": memory.load_memory_variables({})["chat_history"],
178
+ "question": q
179
+ })
180
+ # build the rewrite prompt
181
+ | RunnableLambda(lambda inp: question_rewrite_template.format(**inp))
182
+ # call the LLM to rewrite
183
+ | ChatGroq(api_key=GROQ_API_KEY, model="llama3-8b-8192")
184
+ | StrOutputParser()
185
+ )
186
+
187
+ # ─── 5) RAG + ChatGroq Chain (with rewrite) ────────────────────────────────────
188
 
189
  retrieval_chain = (
190
+ # 1) Rewrite the question first
191
+ rewrite_chain
192
+ # 2) Retrieve docs against the rewritten question
193
+ | RunnableMap({
194
+ "documents": lambda rewritten_q: combined_docs(rewritten_q),
195
+ "question": lambda rewritten_q: rewritten_q
196
  })
197
  | RunnableLambda(filter_docs)
198
+ # 3) Build final inputs and truncate history
199
  | RunnableLambda(lambda d: {
 
200
  "chat_history": "\n".join(
201
+ memory.load_memory_variables({})["chat_history"].splitlines()[-4:]
 
202
  ),
203
  "context": "\n".join(doc.page_content for doc in d["documents"]),
204
  "question": d["question"]
205
  })
206
+ # 4) Format final prompt and call LLM
207
  | RunnableLambda(lambda inp: prompt.format(**inp))
208
  | ChatGroq(api_key=GROQ_API_KEY, model="llama3-8b-8192")
209
  | StrOutputParser()