Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,685 +1,320 @@
|
|
1 |
-
# app.py
|
2 |
-
# ๐ GIfty+ โ Smart Gift Recommender (original Spaces app + Hybrid Ranker v2)
|
3 |
-
# Dataset default: Danielos100/Amazon_products_clean (override via DATASET_ID env)
|
4 |
-
# Retrieval: sentence-transformers/all-MiniLM-L12-v2 (cosine on budget-filtered subset)
|
5 |
-
# DIY: FLAN-only (strict prompts + sanitizers)
|
6 |
-
# Message: FLAN-based, ืื ืื ืจื โ ืกืืืืกืื ืขื ืืืืืฆืื ืืื ืื-ืืืคืืืงืฆืื
|
7 |
-
# UI: Examples-table ืืืขืื (ืงืืืง-ืืืื-ืคืื), ืืืคืก ืคืชืื ืืืกืืืจ ืืฉืืจืืช, ืืื JSON
|
8 |
-
|
9 |
import os, re, json, hashlib, pathlib, random
|
10 |
from typing import Dict, List, Tuple, Optional, Any
|
11 |
-
|
12 |
-
import numpy as np
|
13 |
-
import pandas as pd
|
14 |
-
import gradio as gr
|
15 |
from datasets import load_dataset
|
16 |
-
|
17 |
from sentence_transformers import SentenceTransformer
|
18 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
19 |
-
import torch
|
20 |
-
|
21 |
-
print(f"===== Application Startup at {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')} =====")
|
22 |
|
23 |
-
# --------------------- Config ---------------------
|
24 |
TITLE = "# ๐ GIftyPlus - Smart Gift Recommender\n*Top-3 catalog picks + 1 DIY gift + personalized message*"
|
25 |
-
|
26 |
-
DATASET_ID = os.getenv("DATASET_ID", "Danielos100/Amazon_products_clean")
|
27 |
DATASET_SPLIT = os.getenv("DATASET_SPLIT", "train")
|
28 |
-
|
29 |
MAX_ROWS = int(os.getenv("MAX_ROWS", "12000"))
|
30 |
EMBED_MODEL_ID = os.getenv("EMBED_MODEL_ID", "sentence-transformers/all-MiniLM-L12-v2")
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
try:
|
40 |
-
pathlib.Path(p).mkdir(parents=True, exist_ok=True)
|
41 |
-
test = pathlib.Path(p) / ".write_test"
|
42 |
-
with open(test, "w") as f: f.write("ok")
|
43 |
-
test.unlink(missing_ok=True)
|
44 |
-
return p
|
45 |
-
except Exception:
|
46 |
-
continue
|
47 |
return os.getcwd()
|
48 |
-
|
49 |
EMBED_CACHE_DIR = resolve_cache_dir()
|
50 |
-
print(f"[CACHE] Using EMBED_CACHE_DIR={EMBED_CACHE_DIR}")
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
]
|
58 |
-
OCCASION_UI = [
|
59 |
-
"Birthday","Wedding / Engagement","Anniversary","Graduation","New baby","Housewarming",
|
60 |
-
"Retirement","Holidays","Valentineโs Day","Promotion / New job","Get well soon"
|
61 |
-
]
|
62 |
-
OCCASION_CANON = {
|
63 |
-
"Birthday":"birthday","Wedding / Engagement":"wedding","Anniversary":"anniversary",
|
64 |
-
"Graduation":"graduation","New baby":"new_baby","Housewarming":"housewarming",
|
65 |
-
"Retirement":"retirement","Holidays":"holidays","Valentineโs Day":"valentines",
|
66 |
-
"Promotion / New job":"promotion","Get well soon":"get_well"
|
67 |
-
}
|
68 |
-
RECIPIENT_RELATIONSHIPS = [
|
69 |
-
"Family - Parent","Family - Sibling","Family - Child","Family - Other relative",
|
70 |
-
"Friend","Colleague","Boss","Romantic partner","Teacher / Mentor","Neighbor","Client / Business partner",
|
71 |
-
]
|
72 |
-
MESSAGE_TONES = [
|
73 |
-
"Formal","Casual","Funny","Heartfelt","Inspirational","Playful","Romantic","Appreciative","Encouraging",
|
74 |
-
]
|
75 |
AGE_OPTIONS = {"any":"any","kid (3โ12)":"kids","teen (13โ17)":"teens","adult (18โ64)":"adult","senior (65+)":"senior"}
|
76 |
GENDER_OPTIONS = ["any","female","male","nonbinary"]
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
"sports":["fitness","outdoor","training","yoga","run"],
|
81 |
-
"travel":["luggage","passport","map","trip","vacation"],
|
82 |
-
"cooking":["kitchen","cookware","chef","baking"],
|
83 |
-
"technology":["electronics","gadgets","device","smart","computer"],
|
84 |
-
"music":["audio","headphones","earbuds","speaker","vinyl"],
|
85 |
-
"art":["painting","drawing","sketch","canvas"],
|
86 |
-
"reading":["book","novel","literature"],
|
87 |
-
"gardening":["plants","planter","seeds","garden","indoor"],
|
88 |
-
"fashion":["style","accessory","jewelry"],
|
89 |
-
"gaming":["board game","puzzle","video game","controller"],
|
90 |
-
"photography":["camera","lens","tripod","film"],
|
91 |
-
"hiking":["outdoor","camping","backpack","trek"],
|
92 |
-
"movies":["film","cinema","blu-ray","poster"],
|
93 |
-
"crafts":["diy","handmade","kit","knitting"],
|
94 |
-
"pets":["dog","cat","pet"],
|
95 |
-
"wellness":["relaxation","spa","aromatherapy","self-care"],
|
96 |
-
"collecting":["display","collector","limited edition"],
|
97 |
-
"food":["gourmet","snack","treats","chocolate"],
|
98 |
-
"home decor":["home","decor","wall art","candle"],
|
99 |
-
"science":["lab","experiment","STEM","microscope"],
|
100 |
-
}
|
101 |
-
REL_TO_TOKENS = {
|
102 |
-
"Family - Parent": ["parent","family"],
|
103 |
-
"Family - Sibling": ["sibling","family"],
|
104 |
-
"Family - Child": ["kids","play","family"],
|
105 |
-
"Family - Other relative": ["family","relative"],
|
106 |
-
"Friend": ["friendly"],
|
107 |
-
"Colleague": ["office","work","professional"],
|
108 |
-
"Boss": ["executive","professional","premium"],
|
109 |
-
"Romantic partner": ["romantic","couple"],
|
110 |
-
"Teacher / Mentor": ["teacher","mentor","thank_you"],
|
111 |
-
"Neighbor": ["neighbor","housewarming"],
|
112 |
-
"Client / Business partner": ["professional","thank_you","premium"],
|
113 |
-
}
|
114 |
-
|
115 |
-
# --------------------- Data loading & schema ---------------------
|
116 |
-
_CURRENCY_RE = re.compile(r"[^\d.,\-]+")
|
117 |
-
_NUM_RE = re.compile(r"(\d+(?:[.,]\d+)?)")
|
118 |
-
_RANGE_SEP = re.compile(r"\s*(?:-|โ|โ|to)\s*")
|
119 |
|
120 |
-
|
|
|
121 |
if pd.isna(x): return np.nan
|
122 |
s = str(x).strip().lower()
|
123 |
-
if _RANGE_SEP.search(s):
|
124 |
-
|
125 |
-
|
126 |
-
s = _CURRENCY_RE.sub(" ", s)
|
127 |
-
m = _NUM_RE.search(s.replace(",", "."))
|
128 |
-
try:
|
129 |
-
return float(m.group(1)) if m else np.nan
|
130 |
-
except Exception:
|
131 |
-
return np.nan
|
132 |
|
133 |
-
def _first_present(df
|
134 |
-
|
135 |
-
for c in
|
136 |
if c in df.columns: return c
|
137 |
-
if c.lower() in
|
138 |
return None
|
139 |
|
140 |
-
def _auto_price_col(df
|
141 |
for c in df.columns:
|
142 |
s = df[c]
|
143 |
-
if pd.api.types.is_numeric_dtype(s):
|
144 |
-
nonneg = s.dropna()
|
145 |
-
if not nonneg.empty and (nonneg.between(0.5, 10000)).mean() > 0.6:
|
146 |
-
return c
|
147 |
for c in df.columns:
|
148 |
-
|
149 |
-
if sample.mean() > 0.5:
|
150 |
-
return c
|
151 |
return None
|
152 |
|
153 |
-
def map_amazon_to_schema(
|
154 |
-
name_c
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
out = pd.DataFrame({
|
163 |
-
"name": df_raw.get(name_c, pd.Series("", index=df_raw.index)),
|
164 |
-
"short_desc": df_raw.get(desc_c, pd.Series("", index=df_raw.index)),
|
165 |
-
"tags": df_raw.get(cat_c, pd.Series("", index=df_raw.index)),
|
166 |
-
"price_usd": df_raw.get(price_c, pd.Series(np.nan, index=df_raw.index)).map(_to_price_usd),
|
167 |
-
"image_url": df_raw.get(img_c, pd.Series("", index=df_raw.index)),
|
168 |
-
})
|
169 |
-
out["name"] = out["name"].astype(str).str.strip().str.slice(0, 160)
|
170 |
-
out["short_desc"] = out["short_desc"].astype(str).str.strip().str.slice(0, 600)
|
171 |
-
out["tags"] = out["tags"].astype(str).str.replace("|", ", ").str.lower()
|
172 |
-
return out
|
173 |
|
174 |
-
def extract_top_cat(tags:
|
175 |
-
s
|
176 |
-
|
177 |
-
|
178 |
return s.strip().split(",")[0] if s else ""
|
179 |
|
180 |
-
def load_catalog()
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
df
|
185 |
-
df
|
186 |
-
df
|
187 |
-
|
188 |
-
if len(df) > MAX_ROWS:
|
189 |
-
df = df.sample(n=MAX_ROWS, random_state=42).reset_index(drop=True)
|
190 |
-
|
191 |
-
df["doc"] = (df["name"].fillna("") + " | " + df["tags"].fillna("") + " | " + df["short_desc"].fillna("")).str.strip()
|
192 |
-
df["top_cat"] = df["tags"].map(extract_top_cat)
|
193 |
-
df["blob"] = (df["name"].fillna("") + " " + df["tags"].fillna("") + " " + df["short_desc"].fillna("")).str.lower()
|
194 |
-
|
195 |
-
print(f"[DATA] dataset={DATASET_ID} split={DATASET_SPLIT} rows_final={len(df)}")
|
196 |
return df
|
|
|
197 |
|
198 |
-
CATALOG = load_catalog()
|
199 |
-
|
200 |
-
# --------------------- Embeddings (with dataset-aware cache) ---------------------
|
201 |
class EmbeddingBank:
|
202 |
-
def __init__(
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
def
|
213 |
-
|
214 |
-
if os.path.exists(path):
|
215 |
-
try:
|
216 |
-
embs = np.load(path, mmap_mode="r")
|
217 |
-
if embs.shape[0] == len(docs):
|
218 |
-
print("[EMB] mmap-loaded from cache")
|
219 |
-
return embs
|
220 |
-
except Exception:
|
221 |
-
pass
|
222 |
-
print("[EMB] Building embeddingsโฆ")
|
223 |
-
embs = self.model.encode(docs, convert_to_numpy=True, normalize_embeddings=True, show_progress_bar=True)
|
224 |
-
try:
|
225 |
-
np.save(self._cache_path(len(docs)), embs)
|
226 |
-
embs = np.load(self._cache_path(len(docs)), mmap_mode="r")
|
227 |
-
print(f"[EMB] Saved & mmap-loaded: {embs.shape}")
|
228 |
-
except Exception:
|
229 |
-
print("[EMB] Cache save failed; using RAM only")
|
230 |
-
return embs
|
231 |
|
232 |
-
def query_vec(self, text: str) -> np.ndarray:
|
233 |
-
return self.model.encode([text], convert_to_numpy=True, normalize_embeddings=True)[0]
|
234 |
-
|
235 |
-
EMB = EmbeddingBank(CATALOG["doc"].tolist(), EMBED_MODEL_ID, DATASET_ID)
|
236 |
-
|
237 |
-
# ---- tokens per item for fast overlap (used by recommender bonuses) ----
|
238 |
_tok_rx = re.compile(r"[a-z0-9][a-z0-9\-']*")
|
239 |
-
def _tok_set(text: str) -> set:
|
240 |
-
return set(_tok_rx.findall(str(text).lower()))
|
241 |
if "tok_set" not in CATALOG.columns:
|
242 |
-
CATALOG["tok_set"]
|
243 |
-
CATALOG["name"].fillna("") + " " +
|
244 |
-
CATALOG["tags"].fillna("") + " " +
|
245 |
-
CATALOG["short_desc"].fillna("")
|
246 |
-
).map(_tok_set)
|
247 |
|
248 |
-
# ====================== Recommendations โ Hybrid Ranker v2 ======================
|
249 |
try:
|
250 |
from sentence_transformers import CrossEncoder
|
251 |
-
except
|
252 |
-
CrossEncoder
|
253 |
-
|
254 |
-
|
255 |
-
_CE_MODEL = None
|
256 |
-
|
257 |
def _load_cross_encoder():
|
258 |
global _CE_MODEL
|
259 |
-
if _CE_MODEL is not None:
|
260 |
-
|
261 |
-
if CrossEncoder is None:
|
262 |
-
return None
|
263 |
-
try:
|
264 |
-
_CE_MODEL = CrossEncoder(RERANK_MODEL_ID, device="cpu")
|
265 |
-
print(f"[RERANK] Loaded: {RERANK_MODEL_ID}")
|
266 |
-
except Exception as e:
|
267 |
-
print(f"[RERANK] Failed to load CE: {e}")
|
268 |
-
_CE_MODEL = None
|
269 |
return _CE_MODEL
|
270 |
|
271 |
-
OCCASION_PRIORS
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
def expand_with_synonyms(tokens: List[str]) -> List[str]:
|
285 |
-
out = []
|
286 |
for t in tokens:
|
287 |
-
t
|
288 |
-
if
|
289 |
-
out.append(t)
|
290 |
-
out.extend(SYNONYMS.get(t, []))
|
291 |
return out
|
292 |
|
293 |
-
def profile_to_query(
|
294 |
-
inter
|
295 |
-
expanded
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
if
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
if
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
if age == "kids":
|
328 |
-
return (is_kidsy | (~is_teen & is_kidsy)).to_numpy()
|
329 |
-
return np.ones(len(blob), dtype=bool)
|
330 |
-
|
331 |
-
def _interest_bonus(profile: Dict, idx: np.ndarray) -> np.ndarray:
|
332 |
-
ints = [i.lower() for i in profile.get("interests", []) if i]
|
333 |
-
syns = [s for it in ints for s in SYNONYMS.get(it, [])]
|
334 |
-
vocab = set(ints + syns)
|
335 |
-
if not vocab or idx.size == 0:
|
336 |
-
return np.zeros(len(idx), dtype="float32")
|
337 |
-
counts = np.array([len(CATALOG["tok_set"].iat[i] & vocab) for i in idx], dtype="float32")
|
338 |
-
counts = np.clip(counts, 0, 6)
|
339 |
-
return 0.10 * counts
|
340 |
-
|
341 |
-
def _occasion_bonus(idx: np.ndarray, occ_ui: str) -> np.ndarray:
|
342 |
-
slug = OCCASION_CANON.get(occ_ui or "Birthday", "birthday")
|
343 |
-
pri = OCCASION_PRIORS.get(slug, [])
|
344 |
-
if not pri or idx.size == 0:
|
345 |
-
return np.zeros(len(idx), dtype="float32")
|
346 |
-
bl = CATALOG["blob"].to_numpy()
|
347 |
-
out = np.zeros(len(idx), dtype="float32")
|
348 |
-
for j, i in enumerate(idx):
|
349 |
-
text = bl[i]
|
350 |
-
bonus = 0.0
|
351 |
-
for kw, w in pri:
|
352 |
-
if kw in text:
|
353 |
-
bonus += w
|
354 |
-
out[j] = min(bonus, 0.15)
|
355 |
return out
|
356 |
|
357 |
-
def _minmax(x:
|
358 |
-
if x.size
|
359 |
-
lo,
|
360 |
-
if hi
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
m_price2 = (CATALOG["price_usd"].values >= lo2) & (CATALOG["price_usd"].values <= hi2)
|
401 |
-
idx = np.where(m_price2 & m_gender_ok)[0]
|
402 |
-
if idx.size == 0:
|
403 |
-
cheapest = np.argsort(CATALOG["price_usd"].values)[:3]
|
404 |
-
res = CATALOG.iloc[cheapest].copy()
|
405 |
-
res["similarity"] = np.nan
|
406 |
-
return res[["name","short_desc","price_usd","image_url","similarity"]].reset_index(drop=True)
|
407 |
-
|
408 |
-
q = profile_to_query(profile)
|
409 |
-
qv = EMB.query_vec(q).astype("float32")
|
410 |
-
X = np.asarray(EMB.embs, dtype="float32")[idx]
|
411 |
-
emb_sims = X @ qv
|
412 |
-
|
413 |
-
target_price = (lo + hi)/2.0 if hi > lo else hi
|
414 |
-
prices = CATALOG.iloc[idx]["price_usd"].to_numpy()
|
415 |
-
price_bonus = np.clip(0.12 - np.abs(prices - target_price)/max(target_price,1.0), 0, 0.12).astype("float32")
|
416 |
-
|
417 |
-
int_bonus = _interest_bonus(profile, idx)
|
418 |
-
occ_bonus = _occasion_bonus(idx, profile.get("occ_ui","Birthday"))
|
419 |
-
|
420 |
-
pre_score = emb_sims + price_bonus + int_bonus + occ_bonus
|
421 |
-
|
422 |
-
K1 = min(64, idx.size)
|
423 |
-
top_local = np.argpartition(-pre_score, K1-1)[:K1]
|
424 |
-
cand_idx = idx[top_local]
|
425 |
-
|
426 |
-
emb_norm = _minmax(emb_sims[top_local])
|
427 |
-
price_norm = _minmax(price_bonus[top_local])
|
428 |
-
int_norm = _minmax(int_bonus[top_local])
|
429 |
-
occ_norm = _minmax(oc_bonus := occ_bonus[top_local])
|
430 |
-
|
431 |
-
try:
|
432 |
-
from sentence_transformers import CrossEncoder as _CE
|
433 |
-
ce = _load_cross_encoder()
|
434 |
-
if ce is not None:
|
435 |
-
docs = CATALOG.loc[cand_idx, "doc"].tolist()
|
436 |
-
pairs = [(q, d) for d in docs]
|
437 |
-
ce_raw = np.array(ce.predict(pairs), dtype="float32")
|
438 |
-
ce_norm = _minmax(ce_raw)
|
439 |
-
else:
|
440 |
-
ce_norm = np.zeros_like(emb_norm)
|
441 |
-
except Exception:
|
442 |
-
ce_norm = np.zeros_like(emb_norm)
|
443 |
-
|
444 |
-
final = (
|
445 |
-
0.56 * emb_norm +
|
446 |
-
0.26 * ce_norm +
|
447 |
-
0.10 * int_norm +
|
448 |
-
0.05 * occ_norm +
|
449 |
-
0.03 * price_norm
|
450 |
-
).astype("float32")
|
451 |
-
|
452 |
-
pick_idx = _mmr_select(cand_idx, final, k=min(3, cand_idx.size), lambda_=0.7)
|
453 |
-
|
454 |
-
res = CATALOG.loc[pick_idx].copy()
|
455 |
-
pos = {int(cand_idx[i]): i for i in range(len(cand_idx))}
|
456 |
-
res["similarity"] = [float(final[pos[int(i)]]) for i in pick_idx]
|
457 |
return res[["name","short_desc","price_usd","image_url","similarity"]].reset_index(drop=True)
|
458 |
|
459 |
-
#
|
|
|
|
|
|
|
|
|
460 |
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
DIY_MAX_NEW_TOKENS = int(os.getenv("DIY_MAX_NEW_TOKENS", "120"))
|
468 |
-
|
469 |
-
INTEREST_ALIASES = {
|
470 |
-
"Reading": ["book", "novel", "literary"],
|
471 |
-
"Fashion": ["style", "chic", "silk"],
|
472 |
-
"Home decor": ["candle", "wall", "jar"],
|
473 |
-
"Technology": ["tech", "gadget", "usb"],
|
474 |
-
"Movies": ["film", "cinema", "poster"],
|
475 |
-
}
|
476 |
-
FALLBACK_NOUNS = ["Kit", "Set", "Bundle", "Box", "Pack"]
|
477 |
-
|
478 |
-
_diy_cache = {}
|
479 |
-
def _load_flan(mid: str):
|
480 |
-
if mid in _diy_cache: return _diy_cache[mid]
|
481 |
-
tok = AutoTokenizer.from_pretrained(mid, use_fast=True, trust_remote_code=True)
|
482 |
-
mdl = AutoModelForSeq2SeqLM.from_pretrained(mid, trust_remote_code=True, use_safetensors=True).to(DIY_DEVICE).eval()
|
483 |
-
_diy_cache[mid] = (tok, mdl)
|
484 |
-
print(f"[DIY] Loaded FLAN: {mid}")
|
485 |
-
return _diy_cache[mid]
|
486 |
|
487 |
@torch.inference_mode()
|
488 |
-
def _gen(tok, mdl, prompt
|
489 |
-
if seed is None: seed
|
490 |
random.seed(seed); torch.manual_seed(seed)
|
491 |
-
enc
|
492 |
-
enc =
|
493 |
-
kw = dict(max_new_tokens=max_new_tokens, eos_token_id=tok.eos_token_id, pad_token_id=tok.eos_token_id)
|
494 |
-
if do_sample:
|
495 |
-
kw.update(dict(do_sample=True, temperature=temperature, top_p=top_p))
|
496 |
-
else:
|
497 |
-
kw.update(dict(do_sample=False, num_beams=1))
|
498 |
-
out = mdl.generate(**enc, **kw)
|
499 |
return tok.decode(out[0], skip_special_tokens=True).strip()
|
500 |
|
501 |
-
def _choose_interest_token(interests:
|
502 |
for it in interests:
|
503 |
-
|
504 |
-
if cand: return random.choice(cand)
|
505 |
return (interests[0].split()[0].lower() if interests else "gift")
|
506 |
-
|
507 |
-
def
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
name = re.sub(b, "", name, flags=re.I).strip()
|
516 |
-
name = re.sub(r'[:\-โโ]+$', "", name).strip()
|
517 |
-
alias = _choose_interest_token(interests)
|
518 |
-
base = name.lower()
|
519 |
-
if alias not in base:
|
520 |
-
tokens = [t for t in re.split(r"[\s\-]+", name) if t]
|
521 |
-
if len(tokens) < 4:
|
522 |
-
name = f"{alias.capitalize()} " + " ".join([t.capitalize() for t in tokens]) if tokens else f"{alias.capitalize()} {random.choice(FALLBACK_NOUNS)}"
|
523 |
-
else:
|
524 |
-
tokens.insert(1, alias.capitalize())
|
525 |
-
name = " ".join(tokens)
|
526 |
-
name = re.sub(r'\b(Home Decor:?\s*){2,}', 'Home Decor ', name, flags=re.I)
|
527 |
-
name = _title_case(name)[:80]
|
528 |
-
if len(name.split()) < 3:
|
529 |
-
noun = random.choice(FALLBACK_NOUNS)
|
530 |
-
name = f"{alias.capitalize()} {noun}"
|
531 |
return name
|
532 |
|
533 |
-
def _split_list_text(s
|
534 |
-
s
|
535 |
for sep in seps:
|
536 |
if sep in s:
|
537 |
-
parts
|
538 |
-
parts
|
539 |
-
|
540 |
-
return parts
|
541 |
-
parts = [p.strip(" -โข*.,;:") for p in re.split(r"[\n\r;]+", s)]
|
542 |
-
return [p for p in parts if p]
|
543 |
|
544 |
-
def _coerce_materials(items
|
545 |
-
out
|
546 |
for it in items:
|
547 |
-
it
|
548 |
if not it: continue
|
549 |
-
it
|
550 |
-
if len(it)
|
551 |
-
if not re.search(r"\d",
|
552 |
-
|
553 |
-
if
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
while len(out) < 6 and i < len(base):
|
559 |
-
if base[i].lower() not in [x.lower() for x in out]:
|
560 |
-
out.append(base[i])
|
561 |
-
i += 1
|
562 |
return out[:8]
|
563 |
|
564 |
-
def _coerce_steps(items
|
565 |
-
out
|
566 |
for it in items:
|
567 |
-
it
|
568 |
if not it: continue
|
569 |
-
it
|
570 |
-
if len(it)
|
571 |
-
it
|
572 |
-
|
573 |
-
|
574 |
-
if len(out) >= 8: break
|
575 |
-
while len(out) < 6:
|
576 |
-
out.append(f"Refine and decorate step {len(out)+1}")
|
577 |
return out[:8]
|
578 |
|
579 |
-
def _only_int(s:
|
580 |
-
|
581 |
-
return int(
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
}
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
"recipient_name": profile.get("recipient_name","Recipient"),
|
608 |
-
"relationship": profile.get("relationship","Friend"),
|
609 |
-
"occ_ui": profile.get("occ_ui","Birthday"),
|
610 |
-
"occasion": profile.get("occ_ui","Birthday"),
|
611 |
-
"interests": profile.get("interests",[]),
|
612 |
-
"budget_min": int(float(profile.get("budget_min",10))),
|
613 |
-
"budget_max": int(float(profile.get("budget_max",100))),
|
614 |
-
"age_range": profile.get("age_range","any"),
|
615 |
-
"gender": profile.get("gender","any")
|
616 |
-
}
|
617 |
-
lang = "English"
|
618 |
-
ints_str = ", ".join(p["interests"]) or "general"
|
619 |
-
|
620 |
-
prompt_name = (
|
621 |
-
f"Return ONLY a DIY gift NAME in Title Case (4โ8 words). "
|
622 |
-
f"Must include at least one interest token from: {', '.join(sum(([it]+INTEREST_ALIASES.get(it,[]) for it in p['interests']), [])) or 'gift'}. "
|
623 |
-
f"Occasion: {p['occ_ui']}. Relationship: {p['relationship']}. Language: {lang}. "
|
624 |
-
"Forbidden: the words 'name', 'title', 'family'. No quotes, no trailing punctuation.\n"
|
625 |
-
"Examples:\n"
|
626 |
-
"Reading โ Literary Candle Bookmark Kit\n"
|
627 |
-
"Technology โ Gadget Cable Organizer Set\n"
|
628 |
-
"Home decor โ Rustic Jar Candle Bundle\n"
|
629 |
-
"Output:"
|
630 |
-
)
|
631 |
-
raw_name = _gen(tok, mdl, prompt_name, max_new_tokens=24, do_sample=False)
|
632 |
-
name = _sanitize_name(raw_name, p["interests"])
|
633 |
-
|
634 |
-
prompt_over = (
|
635 |
-
f"Write EXACTLY 2 sentences in {lang} for a handmade gift called '{name}'. "
|
636 |
-
f"Mention {p['recipient_name']} ({p['relationship']}) and the occasion ({p['occ_ui']}). "
|
637 |
-
f"Explain how it reflects the interests: {ints_str}. "
|
638 |
-
"No lists, no emojis. Output only the two sentences."
|
639 |
-
)
|
640 |
-
overview = _gen(tok, mdl, prompt_over, max_new_tokens=80, do_sample=True, temperature=0.9, top_p=0.95)
|
641 |
-
|
642 |
-
prompt_mat = (
|
643 |
-
f"List 6 concise materials with quantities to make '{name}' cheaply. "
|
644 |
-
f"Keep total within {p['budget_min']}-{p['budget_max']} USD. "
|
645 |
-
"Output ONLY a comma-separated list (e.g., 'glass jar x2, soy tealights x4, ...')."
|
646 |
-
)
|
647 |
-
mats_txt = _gen(tok, mdl, prompt_mat, max_new_tokens=96, do_sample=False)
|
648 |
-
materials = _split_list_text(mats_txt, [",", ";"])
|
649 |
-
|
650 |
-
prompt_steps = (
|
651 |
-
f"Write 6 short imperative steps to make '{name}'. "
|
652 |
-
"Output ONLY a semicolon-separated list."
|
653 |
-
)
|
654 |
-
steps_txt = _gen(tok, mdl, prompt_steps, max_new_tokens=120, do_sample=True, temperature=0.9, top_p=0.95)
|
655 |
-
steps = _split_list_text(steps_txt, [";", "\n"])
|
656 |
-
|
657 |
-
prompt_cost = (
|
658 |
-
f"Return ONE integer total cost in USD between {p['budget_min']}-{p['budget_max']}. Output NUMBER only."
|
659 |
-
)
|
660 |
-
cost_txt = _gen(tok, mdl, prompt_cost, max_new_tokens=6, do_sample=False)
|
661 |
-
cost = _only_int(cost_txt)
|
662 |
-
|
663 |
-
time_txt = _gen(tok, mdl, "Return ONE integer minutes between 20 and 180. Output NUMBER only.",
|
664 |
-
max_new_tokens=6, do_sample=False)
|
665 |
-
minutes = _only_int(time_txt)
|
666 |
-
|
667 |
-
idea = _build_json(p, {
|
668 |
-
"gift_name": name,
|
669 |
-
"overview": overview,
|
670 |
-
"materials_needed": materials,
|
671 |
-
"steps": steps,
|
672 |
-
"estimated_cost_usd": cost,
|
673 |
-
"estimated_time_minutes": minutes,
|
674 |
-
})
|
675 |
-
return idea, "ok"
|
676 |
-
# --------------------- END DIY ---------------------
|
677 |
-
|
678 |
|
679 |
# --------------------- Personalized Message (FLAN, ืืืืื + ืืืืืฆืื) ---------------------
|
680 |
# >>>>>>>>>>>>>>>>>>>>>> ืืฉืื ืื ืืืืื ืืงืืืฅ โ ืืืืืฉ ืืืงืืืื <<<<<<<<<<<<<<<<<<<<<<
|
681 |
MSG_MODEL_ID = "google/flan-t5-small"
|
682 |
-
MSG_DEVICE = "
|
683 |
TEMP_RANGE = (0.88, 1.10)
|
684 |
TOPP_RANGE = (0.90, 0.96)
|
685 |
REP_PENALTY = 1.12
|
@@ -763,12 +398,7 @@ TONE_STYLES: Dict[str, Dict[str, List[str]]] = {
|
|
763 |
},
|
764 |
}
|
765 |
BAN_PHRASES = [
|
766 |
-
|
767 |
-
"may your day be filled",
|
768 |
-
"on this special day",
|
769 |
-
"from the bottom of my heart",
|
770 |
-
"best wishes on your",
|
771 |
-
"warm wishes",
|
772 |
]
|
773 |
OPENERS = [
|
774 |
"Hereโs to a moment that fits you perfectly:",
|
@@ -831,14 +461,12 @@ def _build_prompt(profile: Dict[str, Any]) -> Tuple[str, Dict[str,str]]:
|
|
831 |
])
|
832 |
|
833 |
lines = [
|
834 |
-
"
|
835 |
f"Recipient: {name} ({rel}). Occasion: {occ}. Interests: {ints}. Tone: {tone}.",
|
836 |
style["system"],
|
837 |
"Rules:",
|
838 |
*[f"- {r}" for r in style["rules"]],
|
839 |
-
f"- Avoid clichรฉs such as: {', '.join(BAN_PHRASES)}.",
|
840 |
"- No emojis. No bullet points.",
|
841 |
-
"- Do not start with 'Dear' unless Tone is Formal.",
|
842 |
f"- Start with: \"{opener}\" (continue naturally, not as a header).",
|
843 |
f"- End with a natural line similar to: \"{closer}\" (rephrase; do not quote).",
|
844 |
f"- {spice}",
|
@@ -893,291 +521,112 @@ def generate_personal_message(profile: Dict[str, Any], seed: Optional[int]=None,
|
|
893 |
return {"message": fallback, "meta": {"failed": True, "model": MSG_MODEL_ID, "tone": profile.get("tone","Heartfelt")}}
|
894 |
# --------------------- END Personalized Message ---------------------
|
895 |
|
896 |
-
# --------------------- Rendering ---------------------
|
897 |
-
def md_escape(text: str) -> str:
|
898 |
-
return str(text).replace("|","\\|").replace("*","\\*").replace("_","\\_")
|
899 |
|
900 |
-
|
901 |
-
|
|
|
902 |
if not s: return ""
|
903 |
-
cut
|
904 |
-
if len(cut)
|
905 |
-
|
906 |
-
|
907 |
-
|
908 |
-
|
909 |
-
|
910 |
-
|
911 |
-
|
912 |
-
|
913 |
-
|
914 |
-
|
915 |
-
|
916 |
-
img = r.get("image_url","") or ""
|
917 |
-
price_str = f"${price:.0f}" if pd.notna(price) else "N/A"
|
918 |
-
sim_str = f"{sim:.3f}" if pd.notna(sim) else "โ"
|
919 |
-
img_html = f'<img src="{img}" alt="" style="width:84px;height:84px;object-fit:cover;border-radius:10px;margin-left:12px;" />' if img else ""
|
920 |
-
card = f"""
|
921 |
<div style="display:flex;align-items:flex-start;justify-content:space-between;gap:10px;padding:10px;border:1px solid #eee;border-radius:12px;margin-bottom:8px;background:#fff;">
|
922 |
-
<div style="flex:1;min-width:0;">
|
923 |
-
<div style="font-weight:700;">{name}</div>
|
924 |
<div style="font-size:0.95em;margin-top:4px;">{desc}</div>
|
925 |
-
<div style="font-size:0.9em;margin-top:6px;opacity:0.8;">
|
926 |
-
|
927 |
-
|
928 |
-
</div>
|
929 |
-
{img_html}
|
930 |
-
</div>
|
931 |
-
"""
|
932 |
-
rows.append(card)
|
933 |
return "\n".join(rows)
|
934 |
|
935 |
-
|
936 |
-
|
937 |
-
|
938 |
-
|
939 |
-
|
940 |
-
|
941 |
-
|
942 |
-
|
943 |
-
.gr-dataframe
|
944 |
-
.gr-dataframe
|
945 |
-
|
946 |
-
|
947 |
-
.gr-dataframe
|
948 |
-
|
949 |
-
display: block;
|
950 |
-
background: linear-gradient(180deg,#fff, #fafafa);
|
951 |
-
border-radius: 14px;
|
952 |
-
border: 1px solid #e9eef5;
|
953 |
-
box-shadow: 0 1px 1px rgba(16,24,40,.04), 0 1px 2px rgba(16,24,40,.06);
|
954 |
-
padding: 10px 12px;
|
955 |
-
transition: transform .06s ease, box-shadow .12s ease, background .12s ease;
|
956 |
-
}
|
957 |
-
.gr-dataframe tbody tr:hover {
|
958 |
-
transform: translateY(-1px);
|
959 |
-
background: #f8fafc;
|
960 |
-
box-shadow: 0 3px 10px rgba(16,24,40,.08);
|
961 |
-
}
|
962 |
-
|
963 |
-
/* ืชืืื ืืื ืืืืืกืืืื */
|
964 |
-
.gr-dataframe tbody tr td {
|
965 |
-
border: 0 !important;
|
966 |
-
padding: 4px 8px !important;
|
967 |
-
vertical-align: middle;
|
968 |
-
font-size: .92rem;
|
969 |
-
line-height: 1.3;
|
970 |
-
}
|
971 |
-
|
972 |
-
/* ืืืคืืืจืคืื: ืฉื (Recipient) ืืืืืฉ ืืืืื ืืืชืจ */
|
973 |
-
.gr-dataframe tbody tr td:nth-child(1) {
|
974 |
-
font-weight: 700;
|
975 |
-
font-size: 1rem;
|
976 |
-
letter-spacing: .2px;
|
977 |
-
}
|
978 |
-
|
979 |
-
/* ืืืกืื/ืืืจืืข โ ืืงืกื ืขืืื ืืืชืจ */
|
980 |
-
.gr-dataframe tbody tr td:nth-child(2),
|
981 |
-
.gr-dataframe tbody tr td:nth-child(4) {
|
982 |
-
opacity: .8;
|
983 |
-
}
|
984 |
-
|
985 |
-
/* ืชืืืืช ืงืืืคืงืืืืช (Interests, Tone, Gender, Age) */
|
986 |
-
.gr-dataframe tbody tr td:nth-child(3),
|
987 |
-
.gr-dataframe tbody tr td:nth-child(9),
|
988 |
-
.gr-dataframe tbody tr td:nth-child(6),
|
989 |
-
.gr-dataframe tbody tr td:nth-child(5) {
|
990 |
-
display: inline-block;
|
991 |
-
background: #eff4ff;
|
992 |
-
color: #243b6b;
|
993 |
-
border: 1px solid #dbe5ff;
|
994 |
-
border-radius: 999px;
|
995 |
-
padding: 2px 10px !important;
|
996 |
-
font-size: .84rem;
|
997 |
-
margin: 2px 6px 2px 0;
|
998 |
-
}
|
999 |
-
|
1000 |
-
/* ืชืงืฆืื โ ืฆ'ืืค ืืืืฉื (Min/Max) */
|
1001 |
-
.gr-dataframe tbody tr td:nth-child(7),
|
1002 |
-
.gr-dataframe tbody tr td:nth-child(8) {
|
1003 |
-
display: inline-block;
|
1004 |
-
background: #f1f5f9;
|
1005 |
-
border: 1px solid #e2e8f0;
|
1006 |
-
color: #0f172a;
|
1007 |
-
border-radius: 10px;
|
1008 |
-
padding: 2px 8px !important;
|
1009 |
-
font-variant-numeric: tabular-nums;
|
1010 |
-
margin: 2px 6px 2px 0;
|
1011 |
-
}
|
1012 |
-
|
1013 |
-
/* ืคืจืืกื ืงืืืคืงืืืช ืืฉืืจื ืืืช */
|
1014 |
-
.gr-dataframe tbody tr td {
|
1015 |
-
margin-right: 2px;
|
1016 |
-
}
|
1017 |
-
.gr-dataframe tbody tr td:last-child {
|
1018 |
-
margin-right: 0;
|
1019 |
-
}
|
1020 |
-
|
1021 |
-
/* ืืขืืืช ืืืืืืื/ืืกืืจืืช ืืืืชืจืืช ืฉื Handsontable */
|
1022 |
-
.handsontable .wtBorder, .handsontable .htBorders, .handsontable .wtBorder.current { display: none !important; }
|
1023 |
-
.gr-dataframe table td:focus { outline: none !important; box-shadow: none !important; }
|
1024 |
-
"""
|
1025 |
-
) as demo:
|
1026 |
gr.Markdown(TITLE)
|
1027 |
-
|
1028 |
-
# ===== EXAMPLES TABLE (Top, clickable) =====
|
1029 |
gr.Markdown("### Quick examples (click a row to auto-fill)", elem_id="explain")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1030 |
|
1031 |
-
EXAMPLES = [
|
1032 |
-
# interests, occasion, bmin, bmax, name, relationship, age_label, gender, tone
|
1033 |
-
(["Technology","Movies"], "Birthday", 25, 45, "Daniel", "Friend", "adult (18โ64)", "male", "Funny"),
|
1034 |
-
(["Art","Reading","Home decor"], "Anniversary", 30, 60, "Rotem", "Romantic partner", "adult (18โ64)", "female", "Romantic"),
|
1035 |
-
(["Gaming","Photography"], "Birthday", 30,120, "Omer", "Family - Sibling", "teen (13โ17)", "male", "Playful"),
|
1036 |
-
(["Reading","Art"], "Graduation", 15, 35, "Maya", "Friend", "adult (18โ64)", "female", "Heartfelt"),
|
1037 |
-
(["Science","Crafts"], "Holidays", 15, 30, "Adam", "Family - Child", "kid (3โ12)", "any", "Encouraging"),
|
1038 |
-
]
|
1039 |
-
EX_COLS = ["Recipient","Relationship","Interests","Occasion","Age group","Gender","Min $","Max $","Tone"]
|
1040 |
-
EX_DF = pd.DataFrame([
|
1041 |
-
[name, rel, " + ".join(interests), occ, age, gender, bmin, bmax, tone]
|
1042 |
-
for (interests, occ, bmin, bmax, name, rel, age, gender, tone) in EXAMPLES
|
1043 |
-
], columns=EX_COLS)
|
1044 |
-
|
1045 |
-
ex_df = gr.Dataframe(value=EX_DF, interactive=False, wrap=True)
|
1046 |
-
gr.Markdown("---")
|
1047 |
-
|
1048 |
-
# === Inputs (always open) ===
|
1049 |
with gr.Row():
|
1050 |
-
recipient_name
|
1051 |
-
relationship
|
1052 |
with gr.Row():
|
1053 |
-
occasion
|
1054 |
-
age
|
1055 |
-
gender
|
1056 |
-
interests
|
1057 |
-
value=["Technology","Movies"], interactive=True)
|
1058 |
with gr.Row():
|
1059 |
-
budget_min
|
1060 |
-
budget_max
|
1061 |
-
tone
|
1062 |
|
1063 |
-
|
1064 |
-
|
1065 |
-
gr.Markdown("###
|
1066 |
-
|
1067 |
-
gr.
|
1068 |
-
out_diy_md = gr.Markdown()
|
1069 |
-
gr.Markdown("### ๐ Personalized Message")
|
1070 |
-
out_msg = gr.Markdown()
|
1071 |
|
1072 |
-
# --- State: run token (to "override" previous runs safely) ---
|
1073 |
-
run_token = gr.State(0)
|
1074 |
-
|
1075 |
-
# ---- row click handler (fill form) ----
|
1076 |
def _on_example_select(evt: gr.SelectData):
|
1077 |
-
r
|
1078 |
-
|
1079 |
-
|
1080 |
-
row = EX_DF.iloc[r]
|
1081 |
-
ints = [s.strip() for s in str(row["Interests"]).split("+")]
|
1082 |
-
return (
|
1083 |
-
ints, row["Occasion"], int(row["Min $"]), int(row["Max $"]),
|
1084 |
-
row["Recipient"], row["Relationship"], row["Age group"], row["Gender"], row["Tone"]
|
1085 |
-
)
|
1086 |
-
|
1087 |
-
ex_df.select(
|
1088 |
-
_on_example_select,
|
1089 |
-
outputs=[interests, occasion, budget_min, budget_max, recipient_name, relationship, age, gender, tone]
|
1090 |
-
)
|
1091 |
|
1092 |
-
|
1093 |
-
def render_diy_md(j: dict) -> str:
|
1094 |
if not j: return "_DIY generation failed._"
|
1095 |
-
steps
|
1096 |
parts = [
|
1097 |
-
f"**{j.get('gift_name','(no name)')}**",
|
1098 |
-
"",
|
1099 |
-
|
1100 |
-
"",
|
1101 |
-
"**Materials**",
|
1102 |
-
"\n".join(f"- {m}" for m in j.get("materials_needed", [])),
|
1103 |
-
"",
|
1104 |
-
"**Steps**",
|
1105 |
-
"\n".join(f"{i+1}. {s}" for i, s in enumerate(steps)),
|
1106 |
-
"",
|
1107 |
f"**Estimated cost:** ${j.get('estimated_cost_usd','?')} ยท **Time:** {j.get('estimated_time_minutes','?')} min"
|
1108 |
]
|
1109 |
return "\n".join(parts)
|
1110 |
|
1111 |
-
|
1112 |
-
|
1113 |
-
|
1114 |
-
|
1115 |
-
|
1116 |
-
bmin, bmax = 5.0, 500.0
|
1117 |
-
if bmin > bmax: bmin, bmax = bmax, bmin
|
1118 |
-
return {
|
1119 |
-
"recipient_name": name or "Friend",
|
1120 |
-
"relationship": rel or "Friend",
|
1121 |
-
"interests": interests_list or [],
|
1122 |
-
"occ_ui": occasion_val or "Birthday",
|
1123 |
-
"budget_min": bmin,
|
1124 |
-
"budget_max": bmax,
|
1125 |
-
"age_range": AGE_OPTIONS.get(age_label, "any"),
|
1126 |
-
"gender": (gender_val or "any").lower(),
|
1127 |
-
"tone": tone_val or "Heartfelt",
|
1128 |
-
}
|
1129 |
-
|
1130 |
-
# ---- NEW: split into 3 functions (partial results) + token check ----
|
1131 |
-
def start_run(curr_token):
|
1132 |
-
return int(curr_token or 0) + 1
|
1133 |
-
|
1134 |
-
def predict_recs_only(rt, interests_list, occasion_val, bmin, bmax, name, rel, age_label, gender_val, tone_val):
|
1135 |
-
latest = rt
|
1136 |
-
profile = _build_profile(interests_list, occasion_val, bmin, bmax, name, rel, age_label, gender_val, tone_val)
|
1137 |
-
top3 = recommend_top3_budget_first(profile)
|
1138 |
-
top3_html = render_top3_html(top3, age_label)
|
1139 |
-
return gr.update(value=top3_html, visible=True), latest
|
1140 |
-
|
1141 |
-
def predict_diy_only(rt, interests_list, occasion_val, bmin, bmax, name, rel, age_label, gender_val, tone_val):
|
1142 |
-
latest = rt
|
1143 |
-
profile = _build_profile(interests_list, occasion_val, bmin, bmax, name, rel, age_label, gender_val, tone_val)
|
1144 |
-
diy_json, _status = diy_generate(profile)
|
1145 |
-
diy_md = render_diy_md(diy_json)
|
1146 |
-
return gr.update(value=diy_md, visible=True), latest
|
1147 |
|
1148 |
-
def
|
1149 |
-
latest = rt
|
1150 |
-
profile = _build_profile(interests_list, occasion_val, bmin, bmax, name, rel, age_label, gender_val, tone_val)
|
1151 |
-
msg_obj = generate_personal_message(profile)
|
1152 |
-
msg = msg_obj["message"]
|
1153 |
-
return gr.update(value=msg, visible=True), latest
|
1154 |
|
1155 |
-
|
1156 |
-
|
1157 |
-
|
1158 |
-
|
1159 |
-
|
1160 |
-
|
1161 |
-
)
|
1162 |
|
1163 |
-
|
1164 |
-
|
1165 |
-
|
1166 |
-
|
1167 |
-
queue=True,
|
1168 |
-
)
|
1169 |
-
ev_diy = ev_start.then(
|
1170 |
-
predict_diy_only,
|
1171 |
-
inputs=[run_token, interests, occasion, budget_min, budget_max, recipient_name, relationship, age, gender, tone],
|
1172 |
-
outputs=[out_diy_md, run_token],
|
1173 |
-
queue=True,
|
1174 |
-
)
|
1175 |
-
ev_msg = ev_start.then(
|
1176 |
-
predict_msg_only,
|
1177 |
-
inputs=[run_token, interests, occasion, budget_min, budget_max, recipient_name, relationship, age, gender, tone],
|
1178 |
-
outputs=[out_msg, run_token],
|
1179 |
-
queue=True,
|
1180 |
-
)
|
1181 |
|
1182 |
-
if __name__
|
1183 |
demo.launch()
|
|
|
1 |
+
# app.py โ GIftyPlus (lean)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import os, re, json, hashlib, pathlib, random
|
3 |
from typing import Dict, List, Tuple, Optional, Any
|
4 |
+
import numpy as np, pandas as pd, gradio as gr, torch
|
|
|
|
|
|
|
5 |
from datasets import load_dataset
|
|
|
6 |
from sentence_transformers import SentenceTransformer
|
7 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
|
|
|
|
|
8 |
|
|
|
9 |
TITLE = "# ๐ GIftyPlus - Smart Gift Recommender\n*Top-3 catalog picks + 1 DIY gift + personalized message*"
|
10 |
+
DATASET_ID = os.getenv("DATASET_ID", "Danielos100/Amazon_products_clean")
|
|
|
11 |
DATASET_SPLIT = os.getenv("DATASET_SPLIT", "train")
|
|
|
12 |
MAX_ROWS = int(os.getenv("MAX_ROWS", "12000"))
|
13 |
EMBED_MODEL_ID = os.getenv("EMBED_MODEL_ID", "sentence-transformers/all-MiniLM-L12-v2")
|
14 |
|
15 |
+
def resolve_cache_dir():
|
16 |
+
for p in [os.getenv("EMBED_CACHE_DIR"), os.path.join(os.getcwd(), ".gifty_cache"), "/tmp/.gifty_cache"]:
|
17 |
+
if not p: continue
|
18 |
+
pathlib.Path(p).mkdir(parents=True, exist_ok=True)
|
19 |
+
with open(os.path.join(p, ".write_test"), "w") as f: f.write("ok")
|
20 |
+
pathlib.Path(os.path.join(p, ".write_test")).unlink(missing_ok=True)
|
21 |
+
return p
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
return os.getcwd()
|
|
|
23 |
EMBED_CACHE_DIR = resolve_cache_dir()
|
|
|
24 |
|
25 |
+
INTEREST_OPTIONS = ["Sports","Travel","Cooking","Technology","Music","Art","Reading","Gardening","Fashion","Gaming","Photography","Hiking","Movies","Crafts","Pets","Wellness","Collecting","Food","Home decor","Science"]
|
26 |
+
OCCASION_UI = ["Birthday","Wedding / Engagement","Anniversary","Graduation","New baby","Housewarming","Retirement","Holidays","Valentineโs Day","Promotion / New job","Get well soon"]
|
27 |
+
OCCASION_CANON = {"Birthday":"birthday","Wedding / Engagement":"wedding","Anniversary":"anniversary","Graduation":"graduation","New baby":"new_baby","Housewarming":"housewarming","Retirement":"retirement","Holidays":"holidays","Valentineโs Day":"valentines","Promotion / New job":"promotion","Get well soon":"get_well"}
|
28 |
+
RECIPIENT_RELATIONSHIPS = ["Family - Parent","Family - Sibling","Family - Child","Family - Other relative","Friend","Colleague","Boss","Romantic partner","Teacher / Mentor","Neighbor","Client / Business partner"]
|
29 |
+
MESSAGE_TONES = ["Formal","Casual","Funny","Heartfelt","Inspirational","Playful","Romantic","Appreciative","Encouraging"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
AGE_OPTIONS = {"any":"any","kid (3โ12)":"kids","teen (13โ17)":"teens","adult (18โ64)":"adult","senior (65+)":"senior"}
|
31 |
GENDER_OPTIONS = ["any","female","male","nonbinary"]
|
32 |
|
33 |
+
SYNONYMS = {"sports":["fitness","outdoor","training","yoga","run"],"travel":["luggage","passport","map","trip","vacation"],"cooking":["kitchen","cookware","chef","baking"],"technology":["electronics","gadgets","device","smart","computer"],"music":["audio","headphones","earbuds","speaker","vinyl"],"art":["painting","drawing","sketch","canvas"],"reading":["book","novel","literature"],"gardening":["plants","planter","seeds","garden","indoor"],"fashion":["style","accessory","jewelry"],"gaming":["board game","puzzle","video game","controller"],"photography":["camera","lens","tripod","film"],"hiking":["outdoor","camping","backpack","trek"],"movies":["film","cinema","blu-ray","poster"],"crafts":["diy","handmade","kit","knitting"],"pets":["dog","cat","pet"],"wellness":["relaxation","spa","aromatherapy","self-care"],"collecting":["display","collector","limited edition"],"food":["gourmet","snack","treats","chocolate"],"home decor":["home","decor","wall art","candle"],"science":["lab","experiment","STEM","microscope"]}
|
34 |
+
REL_TO_TOKENS = {"Family - Parent":["parent","family"],"Family - Sibling":["sibling","family"],"Family - Child":["kids","play","family"],"Family - Other relative":["family","relative"],"Friend":["friendly"],"Colleague":["office","work","professional"],"Boss":["executive","professional","premium"],"Romantic partner":["romantic","couple"],"Teacher / Mentor":["teacher","mentor","thank_you"],"Neighbor":["neighbor","housewarming"],"Client / Business partner":["professional","thank_you","premium"]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
_CURRENCY_RE = re.compile(r"[^\d.,\-]+"); _NUM_RE = re.compile(r"(\d+(?:[.,]\d+)?)"); _RANGE_SEP = re.compile(r"\s*(?:-|โ|โ|to)\s*")
|
37 |
+
def _to_price_usd(x):
|
38 |
if pd.isna(x): return np.nan
|
39 |
s = str(x).strip().lower()
|
40 |
+
if _RANGE_SEP.search(s): s = _RANGE_SEP.split(s)[0]
|
41 |
+
s = _CURRENCY_RE.sub(" ", s); m = _NUM_RE.search(s.replace(",", "."))
|
42 |
+
return float(m.group(1)) if m else np.nan
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
def _first_present(df, cands):
|
45 |
+
lower = {c.lower(): c for c in df.columns}
|
46 |
+
for c in cands:
|
47 |
if c in df.columns: return c
|
48 |
+
if c.lower() in lower: return lower[c.lower()]
|
49 |
return None
|
50 |
|
51 |
+
def _auto_price_col(df):
|
52 |
for c in df.columns:
|
53 |
s = df[c]
|
54 |
+
if pd.api.types.is_numeric_dtype(s) and not s.dropna().empty and (s.dropna().between(0.5, 10000)).mean() > .6: return c
|
|
|
|
|
|
|
55 |
for c in df.columns:
|
56 |
+
if df[c].astype(str).head(200).str.lower().str.contains(r"\$|โช|eur|usd|ยฃ|โฌ|\d").mean() > .5: return c
|
|
|
|
|
57 |
return None
|
58 |
|
59 |
+
def map_amazon_to_schema(raw: pd.DataFrame) -> pd.DataFrame:
|
60 |
+
name_c=_first_present(raw,["product name","title","name","product_title"]); desc_c=_first_present(raw,["description","product_description","feature","about"])
|
61 |
+
cat_c=_first_present(raw,["category","categories","main_cat","product_category"]); price_c=_first_present(raw,["selling price","price","current_price","list_price","price_amount","actual_price","price_usd"]) or _auto_price_col(raw)
|
62 |
+
img_c=_first_present(raw,["image","image_url","imageurl","imUrl","img","img_url"])
|
63 |
+
df=pd.DataFrame({"name":raw.get(name_c,""),"short_desc":raw.get(desc_c,""),"tags":raw.get(cat_c,""),"price_usd":raw.get(price_c,np.nan),"image_url":raw.get(img_c,"")})
|
64 |
+
df["price_usd"]=df["price_usd"].map(_to_price_usd); df["name"]=df["name"].astype(str).str.strip().str.slice(0,160)
|
65 |
+
df["short_desc"]=df["short_desc"].astype(str).str.strip().str.slice(0,600); df["tags"]=df["tags"].astype(str).str.replace("|",", ").str.lower()
|
66 |
+
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
+
def extract_top_cat(tags:str)->str:
|
69 |
+
s=(tags or "").lower()
|
70 |
+
for sep in ["|",">"]:
|
71 |
+
if sep in s: return s.split(sep,1)[0].strip()
|
72 |
return s.strip().split(",")[0] if s else ""
|
73 |
|
74 |
+
def load_catalog()->pd.DataFrame:
|
75 |
+
df=map_amazon_to_schema(load_dataset(DATASET_ID, split=DATASET_SPLIT).to_pandas()).drop_duplicates(subset=["name","short_desc"])
|
76 |
+
df=df[pd.notna(df["price_usd"])]; df=df[(df["price_usd"]>0)&(df["price_usd"]<=500)].reset_index(drop=True)
|
77 |
+
if len(df)>MAX_ROWS: df=df.sample(n=MAX_ROWS,random_state=42).reset_index(drop=True)
|
78 |
+
df["doc"]=(df["name"].fillna("")+" | "+df["tags"].fillna("")+" | "+df["short_desc"].fillna("")).str.strip()
|
79 |
+
df["top_cat"]=df["tags"].map(extract_top_cat)
|
80 |
+
df["blob"]=(df["name"].fillna("")+" "+df["tags"].fillna("")+" "+df["short_desc"].fillna("")).str.lower()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
return df
|
82 |
+
CATALOG=load_catalog()
|
83 |
|
|
|
|
|
|
|
84 |
class EmbeddingBank:
|
85 |
+
def __init__(s, docs, model_id, dataset_tag):
|
86 |
+
s.model_id=model_id; s.dataset_tag=dataset_tag; s.model=SentenceTransformer(model_id); s.embs=s._load_or_build(docs)
|
87 |
+
def _cache_path(s,n): return os.path.join(EMBED_CACHE_DIR, f"emb_{hashlib.md5((s.dataset_tag+'|'+s.model_id+f'|{n}').encode()).hexdigest()[:10]}.npy")
|
88 |
+
def _load_or_build(s,docs):
|
89 |
+
p=s._cache_path(len(docs))
|
90 |
+
if os.path.exists(p):
|
91 |
+
embs=np.load(p,mmap_mode="r");
|
92 |
+
if embs.shape[0]==len(docs): return embs
|
93 |
+
embs=s.model.encode(docs, convert_to_numpy=True, normalize_embeddings=True, show_progress_bar=True)
|
94 |
+
np.save(p, embs); return np.load(p, mmap_mode="r")
|
95 |
+
def query_vec(s,text): return s.model.encode([text], convert_to_numpy=True, normalize_embeddings=True)[0]
|
96 |
+
EMB=EmbeddingBank(CATALOG["doc"].tolist(), EMBED_MODEL_ID, DATASET_ID)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
_tok_rx = re.compile(r"[a-z0-9][a-z0-9\-']*")
|
|
|
|
|
99 |
if "tok_set" not in CATALOG.columns:
|
100 |
+
CATALOG["tok_set"]=(CATALOG["name"].fillna("")+" "+CATALOG["tags"].fillna("")+" "+CATALOG["short_desc"].fillna("")).map(lambda t:set(_tok_rx.findall(str(t).lower())))
|
|
|
|
|
|
|
|
|
101 |
|
|
|
102 |
try:
|
103 |
from sentence_transformers import CrossEncoder
|
104 |
+
except:
|
105 |
+
CrossEncoder=None
|
106 |
+
RERANK_MODEL_ID=os.getenv("RERANK_MODEL_ID","cross-encoder/ms-marco-MiniLM-L-6-v2")
|
107 |
+
_CE_MODEL=None
|
|
|
|
|
108 |
def _load_cross_encoder():
|
109 |
global _CE_MODEL
|
110 |
+
if _CE_MODEL is None and CrossEncoder is not None:
|
111 |
+
_CE_MODEL=CrossEncoder(RERANK_MODEL_ID, device="cpu")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
return _CE_MODEL
|
113 |
|
114 |
+
OCCASION_PRIORS={"valentines":[("jewelry",.12),("chocolate",.10),("candle",.08),("romantic",.08),("couple",.08),("heart",.06)],
|
115 |
+
"birthday":[("fun",.06),("game",.06),("personalized",.06),("gift set",.05),("surprise",.04)],
|
116 |
+
"anniversary":[("couple",.10),("jewelry",.10),("photo",.08),("frame",.06),("memory",.06),("candle",.06)],
|
117 |
+
"graduation":[("journal",.10),("planner",.08),("office",.08),("coffee",.06),("motivation",.06)],
|
118 |
+
"housewarming":[("home",.10),("kitchen",.08),("decor",.10),("candle",.06),("serving",.06)],
|
119 |
+
"new_baby":[("baby",.12),("nursery",.10),("soft",.06),("blanket",.06)],
|
120 |
+
"retirement":[("relax",.08),("hobby",.08),("travel",.06),("book",.06)],
|
121 |
+
"holidays":[("holiday",.10),("winter",.08),("chocolate",.08),("cozy",.06),("family",.06)],
|
122 |
+
"promotion":[("desk",.10),("office",.10),("premium",.08),("organizer",.06)],
|
123 |
+
"get_well":[("cozy",.10),("tea",.08),("soothing",.06),("care",.06)]}
|
124 |
+
|
125 |
+
def expand_with_synonyms(tokens: List[str])->List[str]:
|
126 |
+
out=[];
|
|
|
|
|
127 |
for t in tokens:
|
128 |
+
t=t.strip().lower()
|
129 |
+
if t: out+=[t]+SYNONYMS.get(t,[])
|
|
|
|
|
130 |
return out
|
131 |
|
132 |
+
def profile_to_query(p:Dict)->str:
|
133 |
+
inter=[i.lower() for i in p.get("interests",[]) if i]; expanded=expand_with_synonyms(inter)*3
|
134 |
+
parts=[", ".join(expanded) if expanded else "", ", ".join(REL_TO_TOKENS.get(p.get("relationship","Friend"),[])), OCCASION_CANON.get(p.get("occ_ui","Birthday"),"birthday")]
|
135 |
+
tail=f"gift ideas for a {p.get('relationship','Friend')} for {parts[-1]}; likes {', '.join(inter) or 'general'}"
|
136 |
+
return " | ".join([x for x in parts if x])+" | "+tail
|
137 |
+
|
138 |
+
def _gender_ok_mask(g:str)->np.ndarray:
|
139 |
+
g=(g or "any").lower(); bl=CATALOG["blob"]
|
140 |
+
has_m=bl.str.contains(r"\b(men|man's|mens|male|for men)\b",regex=True,na=False)
|
141 |
+
has_f=bl.str.contains(r"\b(women|woman's|womens|female|for women|dress)\b",regex=True,na=False)
|
142 |
+
has_u=bl.str.contains(r"\bunisex|gender neutral\b",regex=True,na=False)
|
143 |
+
if g=="female": return (~has_m | has_u).to_numpy()
|
144 |
+
if g=="male": return (~has_f | has_u).to_numpy()
|
145 |
+
return np.ones(len(bl),bool)
|
146 |
+
|
147 |
+
def _mask_by_age(age:str, blob:pd.Series)->np.ndarray:
|
148 |
+
kids=blob.str.contains(r"\b(?:kid|kids|child|children|toddler|baby|boys?|girls?|kid's|children's)\b",regex=True,na=False)
|
149 |
+
teen=blob.str.contains(r"\b(?:teen|teens|young adult|ya)\b",regex=True,na=False)
|
150 |
+
if age in ("adult","senior"): return (~kids).to_numpy()
|
151 |
+
if age=="teens": return ((~kids)|teen).to_numpy()
|
152 |
+
if age=="kids": return (kids | (~teen & kids)).to_numpy()
|
153 |
+
return np.ones(len(blob),bool)
|
154 |
+
|
155 |
+
def _interest_bonus(p:Dict, idx:np.ndarray)->np.ndarray:
|
156 |
+
ints=[i.lower() for i in p.get("interests",[]) if i]; syns=[s for it in ints for s in SYNONYMS.get(it,[])]; vocab=set(ints+syns)
|
157 |
+
if not vocab or idx.size==0: return np.zeros(len(idx),"float32")
|
158 |
+
counts=np.array([len(CATALOG["tok_set"].iat[i] & vocab) for i in idx],"float32"); return .10*np.clip(counts,0,6)
|
159 |
+
|
160 |
+
def _occasion_bonus(idx:np.ndarray, occ_ui:str)->np.ndarray:
|
161 |
+
pri=OCCASION_PRIORS.get(OCCASION_CANON.get(occ_ui or "Birthday","birthday"),[])
|
162 |
+
if not pri or idx.size==0: return np.zeros(len(idx),"float32")
|
163 |
+
bl=CATALOG["blob"].to_numpy(); out=np.zeros(len(idx),"float32")
|
164 |
+
for j,i in enumerate(idx):
|
165 |
+
bonus=sum(w for kw,w in pri if kw in bl[i]); out[j]=min(bonus,.15)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
return out
|
167 |
|
168 |
+
def _minmax(x:np.ndarray)->np.ndarray:
|
169 |
+
if x.size==0: return x
|
170 |
+
lo,hi=float(np.min(x)),float(np.max(x));
|
171 |
+
return np.zeros_like(x) if hi<=lo+1e-9 else (x-lo)/(hi-lo)
|
172 |
+
|
173 |
+
def _mmr_select(cand_idx:np.ndarray, scores:np.ndarray, k:int, lambda_:float=.7)->np.ndarray:
|
174 |
+
if cand_idx.size<=k: return cand_idx[np.argsort(-scores)][:k]
|
175 |
+
picked=[]; rest=list(range(len(cand_idx))); rel=_minmax(scores)
|
176 |
+
V=np.asarray(EMB.embs,"float32")[cand_idx]; V/=np.linalg.norm(V,axis=1,keepdims=True)+1e-8
|
177 |
+
while len(picked)<k and rest:
|
178 |
+
if not picked: picked.append(rest.pop(int(np.argmax(rel[rest])))); continue
|
179 |
+
sim_to_sel=np.array([float((V[c]@V[picked].T) if np.ndim(V[c]@V[picked].T)==0 else np.max(V[c]@V[picked].T)) for c in rest],"float32")
|
180 |
+
j=int(np.argmax(lambda_*rel[rest]-(1-lambda_)*sim_to_sel)); picked.append(rest.pop(j))
|
181 |
+
return cand_idx[np.array(picked,int)]
|
182 |
+
|
183 |
+
def recommend_top3_budget_first(p:Dict)->pd.DataFrame:
|
184 |
+
lo,hi=float(p.get("budget_min",0)),float(p.get("budget_max",1e9))
|
185 |
+
m=(CATALOG["price_usd"].values>=lo)&(CATALOG["price_usd"].values<=hi); age=_mask_by_age(p.get("age_range","any"),CATALOG["blob"]); g=_gender_ok_mask(p.get("gender","any"))
|
186 |
+
idx=np.where(m&age&g)[0]
|
187 |
+
if idx.size==0:
|
188 |
+
idx=np.where(m&g)[0]
|
189 |
+
if idx.size==0:
|
190 |
+
lo2,hi2=max(0,lo*.8), (hi*1.2 if hi<1e8 else hi); idx=np.where(((CATALOG["price_usd"].values>=lo2)&(CATALOG["price_usd"].values<=hi2))&g)[0]
|
191 |
+
if idx.size==0:
|
192 |
+
cheapest=np.argsort(CATALOG["price_usd"].values)[:3]; res=CATALOG.iloc[cheapest].copy(); res["similarity"]=np.nan
|
193 |
+
return res[["name","short_desc","price_usd","image_url","similarity"]].reset_index(drop=True)
|
194 |
+
q=profile_to_query(p); qv=EMB.query_vec(q).astype("float32")
|
195 |
+
emb_sims=np.asarray(EMB.embs,"float32")[idx]@qv
|
196 |
+
target=(lo+hi)/2.0 if hi>lo else hi; prices=CATALOG.iloc[idx]["price_usd"].to_numpy()
|
197 |
+
price_bonus=np.clip(.12-np.abs(prices-target)/max(target,1.0),0,.12).astype("float32")
|
198 |
+
int_bonus=_interest_bonus(p,idx); occ_bonus=_occasion_bonus(idx,p.get("occ_ui","Birthday"))
|
199 |
+
pre=emb_sims+price_bonus+int_bonus+occ_bonus
|
200 |
+
K1=min(48,idx.size); top_local=np.argpartition(-pre,K1-1)[:K1]; cand_idx=idx[top_local]
|
201 |
+
emb_n=_minmax(emb_sims[top_local]); price_n=_minmax(price_bonus[top_local]); int_n=_minmax(int_bonus[top_local]); occ_n=_minmax(occ_bonus[top_local])
|
202 |
+
ce=_load_cross_encoder();
|
203 |
+
if ce is not None:
|
204 |
+
docs=CATALOG.loc[cand_idx,"doc"].tolist(); pairs=[(q,d) for d in docs]
|
205 |
+
k_ce=min(24,len(pairs)); tl=np.argpartition(-emb_n,k_ce-1)[:k_ce]; ce_raw=np.array(ce.predict([pairs[i] for i in tl]),"float32"); ce_n=np.zeros_like(emb_n); ce_n[tl]=_minmax(ce_raw)
|
206 |
+
else:
|
207 |
+
ce_n=np.zeros_like(emb_n)
|
208 |
+
final=(.56*emb_n+.26*ce_n+.10*int_n+.05*occ_n+.03*price_n).astype("float32")
|
209 |
+
pick=_mmr_select(cand_idx,final,k=min(3,cand_idx.size))
|
210 |
+
res=CATALOG.loc[pick].copy(); pos={int(cand_idx[i]):i for i in range(len(cand_idx))}; res["similarity"]=[float(final[pos[int(i)]]) for i in pick]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
return res[["name","short_desc","price_usd","image_url","similarity"]].reset_index(drop=True)
|
212 |
|
213 |
+
# ===== DIY (FLAN-only) =====
|
214 |
+
DIY_MODEL_ID=os.getenv("DIY_MODEL_ID","google/flan-t5-small"); DIY_DEVICE=torch.device("cpu")
|
215 |
+
MAX_INPUT_TOKENS=int(os.getenv("MAX_INPUT_TOKENS","384")); DIY_MAX_NEW_TOKENS=int(os.getenv("DIY_MAX_NEW_TOKENS","120"))
|
216 |
+
INTEREST_ALIASES={"Reading":["book","novel","literary"],"Fashion":["style","chic","silk"],"Home decor":["candle","wall","jar"],"Technology":["tech","gadget","usb"],"Movies":["film","cinema","poster"]}
|
217 |
+
FALLBACK_NOUNS=["Kit","Set","Bundle","Box","Pack"]
|
218 |
|
219 |
+
_diy_cache_model={}
|
220 |
+
def _load_flan(mid:str):
|
221 |
+
if mid in _diy_cache_model: return _diy_cache_model[mid]
|
222 |
+
tok=AutoTokenizer.from_pretrained(mid, use_fast=True, trust_remote_code=True)
|
223 |
+
mdl=AutoModelForSeq2SeqLM.from_pretrained(mid, trust_remote_code=True, use_safetensors=True).to(DIY_DEVICE).eval()
|
224 |
+
_diy_cache_model[mid]=(tok,mdl); return _diy_cache_model[mid]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
|
226 |
@torch.inference_mode()
|
227 |
+
def _gen(tok, mdl, prompt, max_new_tokens=64, do_sample=False, temperature=.9, top_p=.95, seed=None):
|
228 |
+
if seed is None: seed=random.randint(1,10_000_000)
|
229 |
random.seed(seed); torch.manual_seed(seed)
|
230 |
+
enc=tok(prompt, truncation=True, max_length=MAX_INPUT_TOKENS, return_tensors="pt"); enc={k:v.to(DIY_DEVICE) for k,v in enc.items()}
|
231 |
+
out=mdl.generate(**enc, max_new_tokens=max_new_tokens, eos_token_id=tok.eos_token_id, pad_token_id=tok.eos_token_id, **({"do_sample":True,"temperature":temperature,"top_p":top_p} if do_sample else {"do_sample":False,"num_beams":1}))
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
return tok.decode(out[0], skip_special_tokens=True).strip()
|
233 |
|
234 |
+
def _choose_interest_token(interests):
|
235 |
for it in interests:
|
236 |
+
if INTEREST_ALIASES.get(it): return random.choice(INTEREST_ALIASES[it])
|
|
|
237 |
return (interests[0].split()[0].lower() if interests else "gift")
|
238 |
+
def _title_case(s): s=re.sub(r'\s+',' ',s).strip(); s=re.sub(r'["โโโโ]+','',s); return " ".join([w.capitalize() for w in s.split()])
|
239 |
+
def _sanitize_name(name, interests):
|
240 |
+
for b in [r"^the name\b",r"\bmember of the family\b",r"^name\b",r"^title\b"]: name=re.sub(b,"",name,flags=re.I).strip()
|
241 |
+
name=re.sub(r'[:\-โโ]+$',"",name).strip(); alias=_choose_interest_token(interests)
|
242 |
+
if alias not in name.lower():
|
243 |
+
tokens=[t for t in re.split(r"[\s\-]+",name) if t]
|
244 |
+
name=(f"{alias.capitalize()} "+(" ".join([t.capitalize() for t in tokens]) if tokens else random.choice(FALLBACK_NOUNS))) if len(tokens)<4 else " ".join([tokens[0],alias.capitalize(),*tokens[1:]])
|
245 |
+
name=re.sub(r'\b(Home Decor:?\s*){2,}','Home Decor ',name,flags=re.I); name=_title_case(name)[:80]
|
246 |
+
if len(name.split())<3: name=f"{alias.capitalize()} {random.choice(FALLBACK_NOUNS)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
return name
|
248 |
|
249 |
+
def _split_list_text(s,seps):
|
250 |
+
s=s.strip()
|
251 |
for sep in seps:
|
252 |
if sep in s:
|
253 |
+
parts=[p.strip(" -โข*.,;:") for p in s.split(sep) if p.strip(" -โข*.,;:")]
|
254 |
+
if len(parts)>=2: return parts
|
255 |
+
return [p.strip(" -โข*.,;:") for p in re.split(r"[\n\r;]+", s) if p.strip(" -โข*.,;:")]
|
|
|
|
|
|
|
256 |
|
257 |
+
def _coerce_materials(items):
|
258 |
+
out=[]
|
259 |
for it in items:
|
260 |
+
it=re.sub(r'\s+',' ',it).strip(" -โข*.,;:");
|
261 |
if not it: continue
|
262 |
+
it=re.sub(r'(\b\w+\b)(?:\s+\1){2,}',r'\1',it,flags=re.I)
|
263 |
+
if len(it)>60: it=it[:58]+"โฆ"
|
264 |
+
if not re.search(r"\d",it): it+=" x1"
|
265 |
+
if it.lower() not in [x.lower() for x in out]: out.append(it)
|
266 |
+
if len(out)>=8: break
|
267 |
+
base=["Small gift box x1","Decorative paper x2","Twine 2 m","Cardstock sheets x2","Double-sided tape x1","Stickers x8","Ribbon 1 m","Fine-tip marker x1"]
|
268 |
+
for b in base:
|
269 |
+
if len(out)>=6: break
|
270 |
+
if b.lower() not in [x.lower() for x in out]: out.append(b)
|
|
|
|
|
|
|
|
|
271 |
return out[:8]
|
272 |
|
273 |
+
def _coerce_steps(items):
|
274 |
+
out=[]
|
275 |
for it in items:
|
276 |
+
it=it.strip(" -โข*.,;:");
|
277 |
if not it: continue
|
278 |
+
it=re.sub(r'\s+',' ',it);
|
279 |
+
if len(it)>120: it=it[:118]+"โฆ"
|
280 |
+
it=re.sub(r'^(?:\d+[\).\s-]*)','',it); it=it[0].upper()+it[1:] if it else it; out.append(it)
|
281 |
+
if len(out)>=8: break
|
282 |
+
while len(out)<6: out.append(f"Refine and decorate step {len(out)+1}")
|
|
|
|
|
|
|
283 |
return out[:8]
|
284 |
|
285 |
+
def _only_int(s): m=re.search(r"-?\d+",s); return int(m.group()) if m else None
|
286 |
+
def _clamp_num(v,lo,hi,default):
|
287 |
+
try: x=float(v); return int(min(max(x,lo),hi))
|
288 |
+
except: return int((lo+hi)/2 if default is None else default)
|
289 |
+
|
290 |
+
def diy_generate(profile:Dict)->Tuple[dict,str]:
|
291 |
+
tok,mdl=_load_flan(DIY_MODEL_ID)
|
292 |
+
p={"recipient_name":profile.get("recipient_name","Recipient"),"relationship":profile.get("relationship","Friend"),
|
293 |
+
"occ_ui":profile.get("occ_ui","Birthday"),"occasion":profile.get("occ_ui","Birthday"),"interests":profile.get("interests",[]),
|
294 |
+
"budget_min":int(float(profile.get("budget_min",10))),"budget_max":int(float(profile.get("budget_max",100))),
|
295 |
+
"age_range":profile.get("age_range","any"),"gender":profile.get("gender","any")}
|
296 |
+
lang="English"; ints_str=", ".join(p["interests"]) or "general"
|
297 |
+
prompt_name=(f"Return ONLY a DIY gift NAME in Title Case (4โ8 words). Must include at least one interest token from: "
|
298 |
+
f"{', '.join(sum(([it]+INTEREST_ALIASES.get(it,[]) for it in p['interests']), [])) or 'gift'}. "
|
299 |
+
f"Occasion: {p['occ_ui']}. Relationship: {p['relationship']}. Language: {lang}. Forbidden: the words 'name','title','family'. "
|
300 |
+
"No quotes, no trailing punctuation.\nExamples:\nReading โ Literary Candle Bookmark Kit\nTechnology โ Gadget Cable Organizer Set\nHome decor โ Rustic Jar Candle Bundle\nOutput:")
|
301 |
+
name=_sanitize_name(_gen(tok,mdl,prompt_name, max_new_tokens=24, do_sample=False), p["interests"])
|
302 |
+
overview=_gen(tok,mdl,(f"Write EXACTLY 2 sentences in {lang} for a handmade gift called '{name}'. Mention {p['recipient_name']} "
|
303 |
+
f"({p['relationship']}) and the occasion ({p['occ_ui']}). Explain how it reflects the interests: {ints_str}. "
|
304 |
+
"No lists, no emojis. Output only the two sentences."), max_new_tokens=80, do_sample=True, temperature=.9, top_p=.95)
|
305 |
+
materials=_split_list_text(_gen(tok,mdl,(f"List 6 concise materials with quantities to make '{name}' cheaply. Keep total within "
|
306 |
+
f"{p['budget_min']}-{p['budget_max']} USD. Output ONLY a comma-separated list."), max_new_tokens=96, do_sample=False), [",",";"])
|
307 |
+
steps=_split_list_text(_gen(tok,mdl,(f"Write 6 short imperative steps to make '{name}'. Output ONLY a semicolon-separated list."), max_new_tokens=120, do_sample=True, temperature=.9, top_p=.95), [";","\n"])
|
308 |
+
cost=_only_int(_gen(tok,mdl,(f"Return ONE integer total cost in USD between {p['budget_min']}-{p['budget_max']}. Output NUMBER only."), max_new_tokens=6, do_sample=False))
|
309 |
+
minutes=_only_int(_gen(tok,mdl,"Return ONE integer minutes between 20 and 180. Output NUMBER only.", max_new_tokens=6, do_sample=False))
|
310 |
+
idea={"gift_name":name,"overview":overview,"materials_needed":_coerce_materials(materials),"steps":_coerce_steps(steps),
|
311 |
+
"estimated_cost_usd":_clamp_num(cost,p["budget_min"],p["budget_max"],None),"estimated_time_minutes":_clamp_num(minutes,20,180,60)}
|
312 |
+
return idea,"ok"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
|
314 |
# --------------------- Personalized Message (FLAN, ืืืืื + ืืืืืฆืื) ---------------------
|
315 |
# >>>>>>>>>>>>>>>>>>>>>> ืืฉืื ืื ืืืืื ืืงืืืฅ โ ืืืืืฉ ืืืงืืืื <<<<<<<<<<<<<<<<<<<<<<
|
316 |
MSG_MODEL_ID = "google/flan-t5-small"
|
317 |
+
MSG_DEVICE = "cpu"
|
318 |
TEMP_RANGE = (0.88, 1.10)
|
319 |
TOPP_RANGE = (0.90, 0.96)
|
320 |
REP_PENALTY = 1.12
|
|
|
398 |
},
|
399 |
}
|
400 |
BAN_PHRASES = [
|
401 |
+
|
|
|
|
|
|
|
|
|
|
|
402 |
]
|
403 |
OPENERS = [
|
404 |
"Hereโs to a moment that fits you perfectly:",
|
|
|
461 |
])
|
462 |
|
463 |
lines = [
|
464 |
+
"Generate a short gift-card message in English (2โ3 sentences).",
|
465 |
f"Recipient: {name} ({rel}). Occasion: {occ}. Interests: {ints}. Tone: {tone}.",
|
466 |
style["system"],
|
467 |
"Rules:",
|
468 |
*[f"- {r}" for r in style["rules"]],
|
|
|
469 |
"- No emojis. No bullet points.",
|
|
|
470 |
f"- Start with: \"{opener}\" (continue naturally, not as a header).",
|
471 |
f"- End with a natural line similar to: \"{closer}\" (rephrase; do not quote).",
|
472 |
f"- {spice}",
|
|
|
521 |
return {"message": fallback, "meta": {"failed": True, "model": MSG_MODEL_ID, "tone": profile.get("tone","Heartfelt")}}
|
522 |
# --------------------- END Personalized Message ---------------------
|
523 |
|
|
|
|
|
|
|
524 |
|
525 |
+
# ===== Rendering & UI =====
|
526 |
+
def first_sentence(s,max_chars=140):
|
527 |
+
s=(s or "").strip();
|
528 |
if not s: return ""
|
529 |
+
cut=s.split(". ")[0];
|
530 |
+
return cut if len(cut)<=max_chars else cut[:max_chars-1]+"โฆ"
|
531 |
+
|
532 |
+
def render_top3_html(df, age_label):
|
533 |
+
if df is None or df.empty: return "<em>No results found within the current filters.</em>"
|
534 |
+
rows=[]
|
535 |
+
for _,r in df.iterrows():
|
536 |
+
name=str(r.get("name","")).replace("|","\\|").replace("*","\\*").replace("_","\\_")
|
537 |
+
desc=str(first_sentence(r.get("short_desc",""))).replace("|","\\|").replace("*","\\*").replace("_","\\_")
|
538 |
+
price=r.get("price_usd"); sim=r.get("similarity"); img=r.get("image_url","") or ""
|
539 |
+
price_str=f"${price:.0f}" if pd.notna(price) else "N/A"; sim_str=f"{sim:.3f}" if pd.notna(sim) else "โ"
|
540 |
+
img_html=f'<img src="{img}" alt="" style="width:84px;height:84px;object-fit:cover;border-radius:10px;margin-left:12px;" />' if img else ""
|
541 |
+
rows.append(f"""
|
|
|
|
|
|
|
|
|
|
|
542 |
<div style="display:flex;align-items:flex-start;justify-content:space-between;gap:10px;padding:10px;border:1px solid #eee;border-radius:12px;margin-bottom:8px;background:#fff;">
|
543 |
+
<div style="flex:1;min-width:0;"><div style="font-weight:700;">{name}</div>
|
|
|
544 |
<div style="font-size:0.95em;margin-top:4px;">{desc}</div>
|
545 |
+
<div style="font-size:0.9em;margin-top:6px;opacity:0.8;">Price: <b>{price_str}</b> ยท Age: <code>{age_label}</code> ยท Score: <code>{sim_str}</code></div>
|
546 |
+
</div>{img_html}
|
547 |
+
</div>""")
|
|
|
|
|
|
|
|
|
|
|
548 |
return "\n".join(rows)
|
549 |
|
550 |
+
with gr.Blocks(title="๐ GIfty โ Recommender + DIY", css="""
|
551 |
+
#explain{opacity:.85;font-size:.92em;margin-bottom:8px;}
|
552 |
+
.gr-dataframe thead{display:none;}
|
553 |
+
.gr-dataframe table{border-collapse:separate!important;border-spacing:0 10px!important;table-layout:fixed;width:100%;}
|
554 |
+
.gr-dataframe tbody tr{cursor:pointer;display:block;background:linear-gradient(180deg,#fff,#fafafa);border-radius:14px;border:1px solid #e9eef5;box-shadow:0 1px 1px rgba(16,24,40,.04),0 1px 2px rgba(16,24,40,.06);padding:10px 12px;transition:transform .06s ease, box-shadow .12s ease, background .12s ease;}
|
555 |
+
.gr-dataframe tbody tr:hover{transform:translateY(-1px);background:#f8fafc;box-shadow:0 3px 10px rgba(16,24,40,.08);}
|
556 |
+
.gr-dataframe tbody tr td{border:0!important;padding:4px 8px!important;vertical-align:middle;font-size:.92rem;line-height:1.3;}
|
557 |
+
.gr-dataframe tbody tr td:nth-child(1){font-weight:700;font-size:1rem;letter-spacing:.2px;}
|
558 |
+
.gr-dataframe tbody tr td:nth-child(2),.gr-dataframe tbody tr td:nth-child(4){opacity:.8;}
|
559 |
+
.gr-dataframe tbody tr td:nth-child(3),.gr-dataframe tbody tr td:nth-child(9),.gr-dataframe tbody tr td:nth-child(6),.gr-dataframe tbody tr td:nth-child(5){display:inline-block;background:#eff4ff;color:#243b6b;border:1px solid #dbe5ff;border-radius:999px;padding:2px 10px!important;font-size:.84rem;margin:2px 6px 2px 0;}
|
560 |
+
.gr-dataframe tbody tr td:nth-child(7),.gr-dataframe tbody tr td:nth-child(8){display:inline-block;background:#f1f5f9;border:1px solid #e2e8f0;color:#0f172a;border-radius:10px;padding:2px 8px!important;font-variant-numeric:tabular-nums;margin:2px 6px 2px 0;}
|
561 |
+
.handsontable .wtBorder,.handsontable .htBorders,.handsontable .wtBorder.current{display:none!important;}
|
562 |
+
.gr-dataframe table td:focus{outline:none!important;box-shadow:none!important;}
|
563 |
+
""") as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
564 |
gr.Markdown(TITLE)
|
|
|
|
|
565 |
gr.Markdown("### Quick examples (click a row to auto-fill)", elem_id="explain")
|
566 |
+
EXAMPLES=[(["Technology","Movies"],"Birthday",25,45,"Daniel","Friend","adult (18โ64)","male","Funny"),
|
567 |
+
(["Art","Reading","Home decor"],"Anniversary",30,60,"Rotem","Romantic partner","adult (18โ64)","female","Romantic"),
|
568 |
+
(["Gaming","Photography"],"Birthday",30,120,"Omer","Family - Sibling","teen (13โ17)","male","Playful"),
|
569 |
+
(["Reading","Art"],"Graduation",15,35,"Maya","Friend","adult (18โ64)","female","Heartfelt"),
|
570 |
+
(["Science","Crafts"],"Holidays",15,30,"Adam","Family - Child","kid (3โ12)","any","Encouraging")]
|
571 |
+
EX_COLS=["Recipient","Relationship","Interests","Occasion","Age group","Gender","Min $","Max $","Tone"]
|
572 |
+
EX_DF=pd.DataFrame([[name,rel," + ".join(interests),occ,age,gender,bmin,bmax,tone] for (interests,occ,bmin,bmax,name,rel,age,gender,tone) in EXAMPLES], columns=EX_COLS)
|
573 |
+
ex_df=gr.Dataframe(value=EX_DF, interactive=False, wrap=True); gr.Markdown("---")
|
574 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
575 |
with gr.Row():
|
576 |
+
recipient_name=gr.Textbox(label="Recipient name", value="Daniel")
|
577 |
+
relationship=gr.Dropdown(label="Relationship", choices=RECIPIENT_RELATIONSHIPS, value="Friend")
|
578 |
with gr.Row():
|
579 |
+
occasion=gr.Dropdown(label="Occasion", choices=OCCASION_UI, value="Birthday")
|
580 |
+
age=gr.Dropdown(label="Age group", choices=list(AGE_OPTIONS.keys()), value="adult (18โ64)")
|
581 |
+
gender=gr.Dropdown(label="Recipient gender", choices=GENDER_OPTIONS, value="male")
|
582 |
+
interests=gr.CheckboxGroup(label="Interests (select a few)", choices=INTEREST_OPTIONS, value=["Technology","Movies"], interactive=True)
|
|
|
583 |
with gr.Row():
|
584 |
+
budget_min=gr.Slider(label="Min budget (USD)", minimum=5, maximum=500, step=1, value=25)
|
585 |
+
budget_max=gr.Slider(label="Max budget (USD)", minimum=5, maximum=500, step=1, value=45)
|
586 |
+
tone=gr.Dropdown(label="Message tone", choices=MESSAGE_TONES, value="Funny")
|
587 |
|
588 |
+
go=gr.Button("Get GIfty!")
|
589 |
+
gr.Markdown("### ๐ฏ Recommendations"); out_top3=gr.HTML()
|
590 |
+
gr.Markdown("### ๐ ๏ธ DIY Gift"); out_diy_md=gr.Markdown()
|
591 |
+
gr.Markdown("### ๐ Personalized Message"); out_msg=gr.Markdown()
|
592 |
+
run_token=gr.State(0)
|
|
|
|
|
|
|
593 |
|
|
|
|
|
|
|
|
|
594 |
def _on_example_select(evt: gr.SelectData):
|
595 |
+
r=int(evt.index[0] if isinstance(evt.index,(list,tuple)) else evt.index); row=EX_DF.iloc[r]; ints=[s.strip() for s in str(row["Interests"]).split("+")]
|
596 |
+
return (ints,row["Occasion"],int(row["Min $"]),int(row["Max $"]),row["Recipient"],row["Relationship"],row["Age group"],row["Gender"],row["Tone"])
|
597 |
+
ex_df.select(_on_example_select, outputs=[interests, occasion, budget_min, budget_max, recipient_name, relationship, age, gender, tone])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
598 |
|
599 |
+
def render_diy_md(j:dict)->str:
|
|
|
600 |
if not j: return "_DIY generation failed._"
|
601 |
+
steps=j.get('step_by_step_instructions', j.get('steps', []))
|
602 |
parts = [
|
603 |
+
f"**{j.get('gift_name','(no name)')}**","",
|
604 |
+
j.get('overview','').strip(),"",
|
605 |
+
"**Materials**","\n".join(f"- {m}" for m in j.get('materials_needed',[])),"",
|
606 |
+
"**Steps**","\n".join(f"{i+1}. {s}" for i,s in enumerate(steps)),"",
|
|
|
|
|
|
|
|
|
|
|
|
|
607 |
f"**Estimated cost:** ${j.get('estimated_cost_usd','?')} ยท **Time:** {j.get('estimated_time_minutes','?')} min"
|
608 |
]
|
609 |
return "\n".join(parts)
|
610 |
|
611 |
+
def _build_profile(ints, occ, bmin, bmax, name, rel, age_label, gender_val, tone_val):
|
612 |
+
try: bmin=float(bmin); bmax=float(bmax)
|
613 |
+
except: bmin,bmax=5.0,500.0
|
614 |
+
if bmin>bmax: bmin,bmax=bmax,bmin
|
615 |
+
return {"recipient_name":name or "Friend","relationship":rel or "Friend","interests":ints or [],"occ_ui":occ or "Birthday","budget_min":bmin,"budget_max":bmax,"age_range":AGE_OPTIONS.get(age_label,"any"),"gender":(gender_val or "any").lower(),"tone":tone_val or "Heartfelt"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
616 |
|
617 |
+
def start_run(curr): return int(curr or 0)+1
|
|
|
|
|
|
|
|
|
|
|
618 |
|
619 |
+
def predict_recs_only(rt, *args):
|
620 |
+
p=_build_profile(*args); top3=recommend_top3_budget_first(p); return gr.update(value=render_top3_html(top3, args[5]), visible=True), rt
|
621 |
+
def predict_diy_only(rt, *args):
|
622 |
+
p=_build_profile(*args); diy_json,_=diy_generate(p); return gr.update(value=render_diy_md(diy_json), visible=True), rt
|
623 |
+
def predict_msg_only(rt, *args):
|
624 |
+
p=_build_profile(*args); msg_obj=generate_personal_message(p); return gr.update(value=msg_obj["message"], visible=True), rt
|
|
|
625 |
|
626 |
+
ev_start=go.click(start_run, inputs=[run_token], outputs=[run_token], queue=True)
|
627 |
+
ev_start.then(predict_recs_only, inputs=[run_token, interests, occasion, budget_min, budget_max, recipient_name, relationship, age, gender, tone], outputs=[out_top3, run_token], queue=True)
|
628 |
+
ev_start.then(predict_diy_only, inputs=[run_token, interests, occasion, budget_min, budget_max, recipient_name, relationship, age, gender, tone], outputs=[out_diy_md, run_token], queue=True)
|
629 |
+
ev_start.then(predict_msg_only, inputs=[run_token, interests, occasion, budget_min, budget_max, recipient_name, relationship, age, gender, tone], outputs=[out_msg, run_token], queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
630 |
|
631 |
+
if __name__=="__main__":
|
632 |
demo.launch()
|