heymenn commited on
Commit
882d620
·
verified ·
1 Parent(s): 6c9af9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +261 -0
app.py CHANGED
@@ -28,3 +28,264 @@ async def process(data: InputData):
28
  async def process(constraints: InputConstraints):
29
  result = process_input(constraints, global_tech, global_tech_embeddings)
30
  return {"technologies": result}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  async def process(constraints: InputConstraints):
29
  result = process_input(constraints, global_tech, global_tech_embeddings)
30
  return {"technologies": result}
31
+
32
+ import gradio as gr
33
+ import pandas as pd
34
+ import numpy as np
35
+ import random
36
+ import json
37
+
38
+ # --- Dummy Implementations for src.services.utils and src.services.processor ---
39
+ # These functions simulate the behavior of your actual services for the Gradio interface.
40
+
41
+ def load_technologies():
42
+ """
43
+ Dummy function to simulate loading technologies and their embeddings.
44
+ Returns a sample DataFrame and a dummy numpy array for embeddings.
45
+ """
46
+ tech_data = {
47
+ 'id': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
48
+ 'name': [
49
+ 'Machine Learning', 'Cloud Computing', 'Blockchain', 'Cybersecurity',
50
+ 'Data Analytics', 'Artificial Intelligence', 'DevOps', 'Quantum Computing',
51
+ 'Edge Computing', 'Robotics'
52
+ ],
53
+ 'description': [
54
+ 'Algorithms for learning from data.', 'On-demand computing resources.',
55
+ 'Decentralized ledger technology.', 'Protecting systems from threats.',
56
+ 'Analyzing large datasets.', 'Simulating human intelligence.',
57
+ 'Software development and operations.', 'Utilizing quantum mechanics.',
58
+ 'Processing data near the source.', 'Automated machines.'
59
+ ]
60
+ }
61
+ global_tech_df = pd.DataFrame(tech_data)
62
+ # Simulate embeddings as random vectors
63
+ global_tech_embeddings_array = np.random.rand(len(global_tech_df), 128)
64
+ return global_tech_df, global_tech_embeddings_array
65
+
66
+ def set_prompt(problem_description: str) -> str:
67
+ """
68
+ Dummy function to simulate prompt generation.
69
+ """
70
+ return f"Based on the problem: '{problem_description}', what are the key technical challenges and requirements?"
71
+
72
+ def retrieve_constraints(prompt: str) -> list[str]:
73
+ """
74
+ Dummy function to simulate constraint retrieval.
75
+ Returns a few sample constraints based on the prompt.
76
+ """
77
+ if "security" in prompt.lower() or "secure" in prompt.lower():
78
+ return ["high security", "data privacy", "authentication"]
79
+ elif "performance" in prompt.lower() or "speed" in prompt.lower():
80
+ return ["low latency", "high throughput", "scalability"]
81
+ elif "data" in prompt.lower() or "analyze" in prompt.lower():
82
+ return ["data integration", "real-time analytics", "data storage"]
83
+ return ["cost-efficiency", "ease of integration", "maintainability", "scalability"]
84
+
85
+ def stem(text_list: list[str], type_of_text: str) -> list[str]:
86
+ """
87
+ Dummy function to simulate stemming.
88
+ Simplistically removes 'ing', 's', 'es' from words.
89
+ """
90
+ stemmed_list = []
91
+ for text in text_list:
92
+ words = text.split()
93
+ stemmed_words = []
94
+ for word in words:
95
+ word = word.lower()
96
+ if word.endswith("ing"):
97
+ word = word[:-3]
98
+ elif word.endswith("es"):
99
+ word = word[:-2]
100
+ elif word.endswith("s"):
101
+ word = word[:-1]
102
+ stemmed_words.append(word)
103
+ stemmed_list.append(" ".join(stemmed_words))
104
+ return stemmed_list
105
+
106
+ def save_dataframe(df: pd.DataFrame, filename: str):
107
+ """
108
+ Dummy function to simulate saving a DataFrame.
109
+ """
110
+ print(f"Simulating saving DataFrame to {filename}")
111
+ # In a real scenario, you might save to Excel: df.to_excel(filename, index=False)
112
+
113
+ def save_to_pickle(data):
114
+ """
115
+ Dummy function to simulate saving data to a pickle file.
116
+ """
117
+ print(f"Simulating saving data to pickle: {type(data)}")
118
+
119
+ def get_contrastive_similarities(constraints_stemmed: list[str], global_tech_df: pd.DataFrame, global_tech_embeddings: np.ndarray):
120
+ """
121
+ Dummy function to simulate getting contrastive similarities.
122
+ Returns a dummy similarity matrix and result similarities.
123
+ """
124
+ num_constraints = len(constraints_stemmed)
125
+ num_tech = len(global_tech_df)
126
+
127
+ # Simulate a similarity matrix
128
+ # Each row corresponds to a constraint, each column to a technology
129
+ matrix = np.random.rand(num_constraints, num_tech)
130
+ matrix = np.round(matrix, 3) # Round for better display
131
+
132
+ # Simulate result_similarities (e.g., top 3 technologies for each constraint)
133
+ result_similarities = {}
134
+ for i, constraint in enumerate(constraints_stemmed):
135
+ # Get top 3 tech indices for this constraint
136
+ top_tech_indices = np.argsort(matrix[i])[::-1][:3]
137
+ top_tech_names = [global_tech_df.iloc[idx]['name'] for idx in top_tech_indices]
138
+ top_tech_scores = [matrix[i, idx] for idx in top_tech_indices]
139
+ result_similarities[constraint] = list(zip(top_tech_names, top_tech_scores))
140
+
141
+ return result_similarities, matrix
142
+
143
+ def find_best_list_combinations(constraints_stemmed: list[str], global_tech_df: pd.DataFrame, matrix: np.ndarray) -> list[dict]:
144
+ """
145
+ Dummy function to simulate finding best list combinations.
146
+ Returns a few dummy combinations of technologies.
147
+ """
148
+ best_combinations = []
149
+ # Simulate finding combinations that best cover constraints
150
+ for i in range(min(3, len(constraints_stemmed))): # Create up to 3 dummy combinations
151
+ combination = {
152
+ "technologies": [],
153
+ "score": round(random.uniform(0.7, 0.95), 2),
154
+ "covered_constraints": []
155
+ }
156
+ num_tech_in_combo = random.randint(2, 4)
157
+ selected_tech_ids = random.sample(global_tech_df['id'].tolist(), num_tech_in_combo)
158
+ for tech_id in selected_tech_ids:
159
+ tech_name = global_tech_df[global_tech_df['id'] == tech_id]['name'].iloc[0]
160
+ combination["technologies"].append({"id": tech_id, "name": tech_name})
161
+
162
+ # Assign some random constraints to be covered
163
+ num_covered_constraints = random.randint(1, len(constraints_stemmed))
164
+ combination["covered_constraints"] = random.sample(constraints_stemmed, num_covered_constraints)
165
+
166
+ best_combinations.append(combination)
167
+ return best_combinations
168
+
169
+ def select_technologies(best_combinations: list[dict]) -> list[int]:
170
+ """
171
+ Dummy function to simulate selecting technologies based on best combinations.
172
+ Returns a list of unique technology IDs.
173
+ """
174
+ selected_ids = set()
175
+ for combo in best_combinations:
176
+ for tech in combo["technologies"]:
177
+ selected_ids.add(tech["id"])
178
+ return list(selected_ids)
179
+
180
+ def get_technologies_by_id(tech_ids: list[int], global_tech_df: pd.DataFrame) -> list[dict]:
181
+ """
182
+ Dummy function to simulate retrieving technology details by ID.
183
+ """
184
+ selected_technologies = []
185
+ for tech_id in tech_ids:
186
+ tech_info = global_tech_df[global_tech_df['id'] == tech_id]
187
+ if not tech_info.empty:
188
+ selected_technologies.append(tech_info.iloc[0].to_dict())
189
+ return selected_technologies
190
+
191
+ # --- Core Logic (Modified for Gradio Interface) ---
192
+
193
+ # Load global technologies and embeddings once when the app starts
194
+ global_tech_df, global_tech_embeddings_array = load_technologies()
195
+
196
+ def process_input_gradio(problem_description: str):
197
+ """
198
+ Processes the input problem description step-by-step for Gradio.
199
+ Returns all intermediate results.
200
+ """
201
+ # Step 1: Set Prompt
202
+ prompt = set_prompt(problem_description)
203
+
204
+ # Step 2: Retrieve Constraints
205
+ constraints = retrieve_constraints(prompt)
206
+
207
+ # Step 3: Stem Constraints
208
+ constraints_stemmed = stem(constraints, "constraints")
209
+ save_dataframe(pd.DataFrame({"stemmed_constraints": constraints_stemmed}), "constraints_stemmed.xlsx")
210
+
211
+ # Step 4: Global Tech (already loaded, just acknowledge)
212
+ # save_dataframe(global_tech_df, "global_tech.xlsx") # This is already done implicitly by loading
213
+
214
+ # Step 5: Get Contrastive Similarities
215
+ result_similarities, matrix = get_contrastive_similarities(
216
+ constraints_stemmed, global_tech_df, global_tech_embeddings_array
217
+ )
218
+ save_to_pickle(result_similarities)
219
+
220
+ # Step 6: Find Best List Combinations
221
+ best_combinations = find_best_list_combinations(constraints_stemmed, global_tech_df, matrix)
222
+
223
+ # Step 7: Select Technologies
224
+ best_technologies_id = select_technologies(best_combinations)
225
+
226
+ # Step 8: Get Technologies by ID
227
+ best_technologies = get_technologies_by_id(best_technologies_id, global_tech_df)
228
+
229
+ # Format outputs for Gradio
230
+ # Convert numpy array to list of lists for better Gradio display
231
+ matrix_display = matrix.tolist()
232
+
233
+ # Convert result_similarities to a more readable format for Gradio
234
+ result_similarities_display = {
235
+ k: ", ".join([f"{name} ({score:.3f})" for name, score in v])
236
+ for k, v in result_similarities.items()
237
+ }
238
+
239
+ best_combinations_display = json.dumps(best_combinations, indent=2)
240
+ best_technologies_display = json.dumps(best_technologies, indent=2)
241
+
242
+ return (
243
+ prompt,
244
+ ", ".join(constraints),
245
+ ", ".join(constraints_stemmed),
246
+ "Global technologies loaded and ready.", # Acknowledge tech loading
247
+ str(result_similarities_display), # Convert dict to string for display
248
+ pd.DataFrame(matrix_display, index=constraints_stemmed, columns=global_tech_df['name']), # Display matrix as DataFrame
249
+ best_combinations_display,
250
+ ", ".join(map(str, best_technologies_id)),
251
+ best_technologies_display
252
+ )
253
+
254
+ # --- Gradio Interface Setup ---
255
+
256
+ # Define the input and output components
257
+ input_problem = gr.Textbox(
258
+ label="Enter Problem Description",
259
+ placeholder="e.g., Develop a secure and scalable e-commerce platform with real-time analytics."
260
+ )
261
+
262
+ output_prompt = gr.Textbox(label="1. Generated Prompt", interactive=False)
263
+ output_constraints = gr.Textbox(label="2. Retrieved Constraints", interactive=False)
264
+ output_stemmed_constraints = gr.Textbox(label="3. Stemmed Constraints", interactive=False)
265
+ output_tech_loaded = gr.Textbox(label="4. Global Technologies Status", interactive=False)
266
+ output_similarities = gr.Textbox(label="5. Result Similarities (Constraint -> Top Technologies)", interactive=False)
267
+ output_matrix = gr.Dataframe(label="6. Similarity Matrix (Constraints vs. Technologies)", interactive=False)
268
+ output_best_combinations = gr.JSON(label="7. Best Technology Combinations Found", interactive=False)
269
+ output_selected_ids = gr.Textbox(label="8. Selected Technology IDs", interactive=False)
270
+ output_final_technologies = gr.JSON(label="9. Final Best Technologies", interactive=False)
271
+
272
+
273
+ # Create the Gradio Interface
274
+ gr.Interface(
275
+ fn=process_input_gradio,
276
+ inputs=input_problem,
277
+ outputs=[
278
+ output_prompt,
279
+ output_constraints,
280
+ output_stemmed_constraints,
281
+ output_tech_loaded,
282
+ output_similarities,
283
+ output_matrix,
284
+ output_best_combinations,
285
+ output_selected_ids,
286
+ output_final_technologies
287
+ ],
288
+ title="Insight Finder: Step-by-Step Technology Selection",
289
+ description="Enter a problem description to see how relevant technologies are identified through various processing steps.",
290
+ allow_flagging="never"
291
+ ).launch()