Besimplestudio commited on
Commit
6e80856
·
verified ·
1 Parent(s): 1faeddd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -224
app.py CHANGED
@@ -1,225 +1,63 @@
1
- import requests
2
- import pandas as pd
3
- import logging
4
  import gradio as gr
5
- from typing import List, Dict, Any
6
- from datetime import datetime
7
- from collections import Counter
8
- import re
9
-
10
- logging.basicConfig(level=logging.INFO)
11
-
12
- class AmazonSuggestionExpander:
13
- def __init__(self):
14
- self.base_url = "https://completion.amazon.com/api/2017/suggestions"
15
- self.headers = {
16
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
17
- }
18
-
19
- def get_amazon_suggestions(self, keyword: str, progress: gr.Progress = None) -> List[str]:
20
- try:
21
- params = {
22
- 'mid': 'ATVPDKIKX0DER',
23
- 'alias': 'aps',
24
- 'prefix': keyword
25
- }
26
- response = requests.get(self.base_url, headers=self.headers, params=params)
27
- response.raise_for_status()
28
-
29
- data = response.json()
30
- suggestions = [item.get('suggestion', '') for item in data.get('suggestions', [])]
31
- return [s for s in suggestions if s] # Filter out empty suggestions
32
-
33
- except requests.RequestException as e:
34
- logging.error(f"Request error: {str(e)}")
35
- raise Exception(f"Failed to fetch suggestions: {str(e)}")
36
- except Exception as e:
37
- logging.error(f"General error: {str(e)}")
38
- raise Exception(f"An error occurred: {str(e)}")
39
-
40
- class KeywordAnalyzer:
41
- def __init__(self):
42
- self.common_words = set(['the', 'and', 'for', 'with', 'in', 'on', 'at', 'to'])
43
-
44
- def analyze_suggestions(self, suggestions: List[str]) -> Dict[str, Any]:
45
- word_counts = [len(s.split()) for s in suggestions]
46
- lengths = [len(s) for s in suggestions]
47
-
48
- # Word frequency analysis
49
- all_words = ' '.join(suggestions).lower().split()
50
- word_freq = Counter([w for w in all_words if w not in self.common_words])
51
-
52
- analysis = {
53
- 'total_count': len(suggestions),
54
- 'avg_word_count': sum(word_counts) / len(suggestions) if suggestions else 0,
55
- 'length_stats': {
56
- 'avg_length': sum(lengths) / len(lengths) if lengths else 0,
57
- 'max_length': max(lengths) if lengths else 0,
58
- 'min_length': min(lengths) if lengths else 0
59
- },
60
- 'common_words': dict(word_freq.most_common(10)),
61
- 'price_mentions': sum(1 for s in suggestions if '$' in s or 'price' in s.lower()),
62
- 'brand_mentions': sum(1 for s in suggestions if any(char.isupper() for char in s.split()))
63
- }
64
- return analysis
65
-
66
- def format_analysis_output(analysis: Dict[str, Any]) -> str:
67
- """Format analysis results as readable text"""
68
- output = "\nAnalysis Results:\n"
69
- output += f"Total Suggestions: {analysis['total_count']}\n"
70
- output += f"Average Words per Suggestion: {analysis['avg_word_count']:.1f}\n"
71
- output += f"Average Length: {analysis['length_stats']['avg_length']:.1f} characters\n"
72
- output += "\nMost Common Words:\n"
73
- for word, count in analysis['common_words'].items():
74
- output += f"- {word}: {count}\n"
75
- return output
76
-
77
- def create_visualization(analysis: Dict[str, Any]) -> str:
78
- """Create HTML visualization of the analysis"""
79
- html = f"""
80
- <!DOCTYPE html>
81
- <html>
82
- <head>
83
- <style>
84
- .analysis-container {{
85
- font-family: Arial, sans-serif;
86
- max-width: 800px;
87
- margin: 0 auto;
88
- padding: 20px;
89
- }}
90
- .metric-card {{
91
- background: #f5f5f5;
92
- border-radius: 8px;
93
- padding: 15px;
94
- margin: 10px 0;
95
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
96
- }}
97
- .word-cloud {{
98
- display: flex;
99
- flex-wrap: wrap;
100
- gap: 10px;
101
- margin: 10px 0;
102
- }}
103
- .word-item {{
104
- background: #e0e0e0;
105
- padding: 5px 10px;
106
- border-radius: 15px;
107
- font-size: 14px;
108
- }}
109
- h3 {{
110
- margin: 0 0 10px 0;
111
- color: #333;
112
- }}
113
- p {{
114
- margin: 5px 0;
115
- color: #666;
116
- }}
117
- </style>
118
- </head>
119
- <body>
120
- <div class="analysis-container">
121
- <div class="metric-card">
122
- <h3>Overall Statistics</h3>
123
- <p>Total Suggestions: {analysis['total_count']}</p>
124
- <p>Average Words: {analysis['avg_word_count']:.1f}</p>
125
- <p>Price Mentions: {analysis['price_mentions']}</p>
126
- <p>Brand Mentions: {analysis['brand_mentions']}</p>
127
- </div>
128
-
129
- <div class="metric-card">
130
- <h3>Length Statistics</h3>
131
- <p>Average Length: {analysis['length_stats']['avg_length']:.1f} characters</p>
132
- <p>Maximum Length: {analysis['length_stats']['max_length']} characters</p>
133
- <p>Minimum Length: {analysis['length_stats']['min_length']} characters</p>
134
- </div>
135
-
136
- <div class="metric-card">
137
- <h3>Most Common Words</h3>
138
- <div class="word-cloud">
139
- {' '.join([f'<span class="word-item">{word} ({count})</span>'
140
- for word, count in analysis['common_words'].items()])}
141
- </div>
142
- </div>
143
- </div>
144
- </body>
145
- </html>
146
- """
147
- return html
148
-
149
- def search_and_display(keyword: str, min_length: int = 0, progress: gr.Progress = gr.Progress()) -> tuple:
150
- """Main function to handle the search and display process"""
151
- if not keyword:
152
- return "Please enter a keyword", None, None
153
-
154
- try:
155
- # Initialize expander and get suggestions
156
- expander = AmazonSuggestionExpander()
157
- suggestions = expander.get_amazon_suggestions(keyword, progress)
158
-
159
- # Filter suggestions by minimum length if specified
160
- if min_length > 0:
161
- suggestions = [s for s in suggestions if len(s.split()) >= min_length]
162
-
163
- if not suggestions:
164
- return "No suggestions found", None, None
165
-
166
- # Analyze suggestions
167
- analyzer = KeywordAnalyzer()
168
- analysis = analyzer.analyze_suggestions(suggestions)
169
-
170
- # Create DataFrame and save to CSV
171
- df = pd.DataFrame(suggestions, columns=['Suggestions'])
172
- df['Word Count'] = df['Suggestions'].str.split().str.len()
173
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
174
- csv_filename = f"amazon_suggestions_{timestamp}.csv"
175
- df.to_csv(csv_filename, index=False)
176
-
177
- # Create output text
178
- output_text = f"Found {len(suggestions)} suggestions for '{keyword}':\n\n"
179
- for i, suggestion in enumerate(suggestions, 1):
180
- output_text += f"{i}. {suggestion}\n"
181
-
182
- # Add analysis results
183
- output_text += "\n" + format_analysis_output(analysis)
184
-
185
- try:
186
- # Create visualization
187
- visualization = create_visualization(analysis)
188
- except Exception as viz_error:
189
- logging.error(f"Visualization error: {str(viz_error)}")
190
- visualization = "<p>Error creating visualization</p>"
191
-
192
- return output_text, csv_filename, visualization
193
-
194
- except Exception as e:
195
- logging.error(f"Error in search_and_display: {str(e)}")
196
- return f"Error occurred: {str(e)}", None, "<p>Error occurred during analysis</p>"
197
-
198
- # Create Gradio interface
199
- iface = gr.Interface(
200
- fn=search_and_display,
201
- inputs=[
202
- gr.Textbox(label="Enter keyword", placeholder="Type your keyword here..."),
203
- gr.Slider(minimum=0, maximum=10, step=1, label="Minimum word count filter", value=0)
204
- ],
205
- outputs=[
206
- gr.Textbox(label="Results", lines=20),
207
- gr.File(label="Download CSV"),
208
- gr.HTML(label="Analysis Visualization")
209
- ],
210
- title="Advanced Amazon Suggestion Expander",
211
- description="""
212
- Get expanded keyword suggestions from Amazon's search autocomplete with detailed analysis.
213
- Enter a keyword to see all related suggestions, analytics, and visualizations.
214
- """,
215
- examples=[
216
- ["coffee maker", 2],
217
- ["gaming laptop", 3],
218
- ["yoga mat", 1]
219
- ],
220
- cache_examples=True,
221
- theme=gr.themes.Soft()
222
- )
223
-
224
- if __name__ == "__main__":
225
- iface.launch()
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load Hugging Face models
5
+ # Text generation for keyword suggestions
6
+ keyword_generator = pipeline("text-generation", model="gpt2", tokenizer="gpt2")
7
+
8
+ # Sentiment analysis for niche review insights
9
+ sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
10
+
11
+
12
+ # Function to generate keyword suggestions
13
+ def suggest_keywords(prompt):
14
+ results = keyword_generator(prompt, max_length=50, num_return_sequences=3)
15
+ suggestions = [res["text"].strip() for res in results]
16
+ return "\n".join(suggestions)
17
+
18
+
19
+ # Function to analyze sentiment of user-input text
20
+ def analyze_sentiment(text):
21
+ sentiments = sentiment_analyzer(text)
22
+ return sentiments
23
+
24
+
25
+ # Gradio Interface Design
26
+ with gr.Blocks() as app:
27
+ gr.Markdown(
28
+ """
29
+ # KDP Keyword Suggestion App
30
+ Generate profitable KDP coloring book niches and analyze customer feedback!
31
+ """
32
+ )
33
+
34
+ # Section for keyword generation
35
+ with gr.Row():
36
+ with gr.Column():
37
+ prompt_input = gr.Textbox(
38
+ label="Enter Keyword Prompt",
39
+ placeholder="E.g., coloring book for kids about",
40
+ )
41
+ keyword_output = gr.Textbox(label="Generated Keywords", lines=5)
42
+
43
+ keyword_button = gr.Button("Generate Keywords")
44
+ keyword_button.click(suggest_keywords, inputs=prompt_input, outputs=keyword_output)
45
+
46
+ # Section for sentiment analysis
47
+ with gr.Row():
48
+ with gr.Column():
49
+ review_input = gr.Textbox(
50
+ label="Enter Text for Sentiment Analysis",
51
+ placeholder="Paste a customer review or feedback here...",
52
+ lines=4,
53
+ )
54
+ sentiment_output = gr.Label(label="Sentiment Analysis Result")
55
+
56
+ sentiment_button = gr.Button("Analyze Sentiment")
57
+ sentiment_button.click(analyze_sentiment, inputs=review_input, outputs=sentiment_output)
58
+
59
+ # Footer
60
+ gr.Markdown("Built with ❤️ using Hugging Face and Gradio for KDP enthusiasts!")
61
+
62
+ # Launch the app
63
+ app.launch()