Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,19 +13,25 @@ except Exception as e:
|
|
13 |
keyword_generator = None
|
14 |
print(f"Error loading model: {e}")
|
15 |
|
16 |
-
# Function to generate keywords
|
17 |
def suggest_keywords(prompt):
|
18 |
if not keyword_generator:
|
19 |
return "Model failed to load. Please check the logs or environment."
|
20 |
|
21 |
try:
|
22 |
-
#
|
23 |
-
|
24 |
|
25 |
-
#
|
|
|
|
|
|
|
26 |
suggestions = [res["generated_text"].strip() for res in results]
|
27 |
|
28 |
-
|
|
|
|
|
|
|
29 |
except Exception as e:
|
30 |
return f"Error generating keywords: {e}"
|
31 |
|
|
|
13 |
keyword_generator = None
|
14 |
print(f"Error loading model: {e}")
|
15 |
|
16 |
+
# Function to generate keywords with a refined prompt and post-processing
|
17 |
def suggest_keywords(prompt):
|
18 |
if not keyword_generator:
|
19 |
return "Model failed to load. Please check the logs or environment."
|
20 |
|
21 |
try:
|
22 |
+
# Refined prompt to generate meaningful keywords
|
23 |
+
refined_prompt = f"Generate a list of keywords related to KDP coloring books for kids. Based on the prompt: '{prompt}'"
|
24 |
|
25 |
+
# Generate keywords with adjusted max_length and temperature for better coherence
|
26 |
+
results = keyword_generator(refined_prompt, max_length=30, num_return_sequences=3, truncation=True, pad_token_id=keyword_generator.tokenizer.eos_token_id, temperature=0.7)
|
27 |
+
|
28 |
+
# Extract generated text
|
29 |
suggestions = [res["generated_text"].strip() for res in results]
|
30 |
|
31 |
+
# Post-process the results to remove irrelevant or overly short suggestions
|
32 |
+
clean_suggestions = [suggestion for suggestion in suggestions if len(suggestion.split()) > 2]
|
33 |
+
|
34 |
+
return "\n".join(clean_suggestions)
|
35 |
except Exception as e:
|
36 |
return f"Error generating keywords: {e}"
|
37 |
|