heymenn commited on
Commit
22e1752
·
verified ·
1 Parent(s): 69d06c3

Update src/core.py

Browse files
Files changed (1) hide show
  1. src/core.py +263 -1
src/core.py CHANGED
@@ -49,4 +49,266 @@ def process_input_from_constraints(constraints, global_tech, global_tech_embeddi
49
  best_technologies_id = select_technologies(best_combinations)
50
  best_technologies = get_technologies_by_id(best_technologies_id,global_tech)
51
 
52
- return best_technologies
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  best_technologies_id = select_technologies(best_combinations)
50
  best_technologies = get_technologies_by_id(best_technologies_id,global_tech)
51
 
52
+ return best_technologies
53
+
54
+
55
+ import gradio as gr
56
+ import pandas as pd
57
+ import numpy as np
58
+ import random
59
+ import json
60
+
61
+ # --- Dummy Implementations for src.services.utils and src.services.processor ---
62
+ # These functions simulate the behavior of your actual services for the Gradio interface.
63
+
64
+ def load_technologies():
65
+ """
66
+ Dummy function to simulate loading technologies and their embeddings.
67
+ Returns a sample DataFrame and a dummy numpy array for embeddings.
68
+ """
69
+ tech_data = {
70
+ 'id': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
71
+ 'name': [
72
+ 'Machine Learning', 'Cloud Computing', 'Blockchain', 'Cybersecurity',
73
+ 'Data Analytics', 'Artificial Intelligence', 'DevOps', 'Quantum Computing',
74
+ 'Edge Computing', 'Robotics'
75
+ ],
76
+ 'description': [
77
+ 'Algorithms for learning from data.', 'On-demand computing resources.',
78
+ 'Decentralized ledger technology.', 'Protecting systems from threats.',
79
+ 'Analyzing large datasets.', 'Simulating human intelligence.',
80
+ 'Software development and operations.', 'Utilizing quantum mechanics.',
81
+ 'Processing data near the source.', 'Automated machines.'
82
+ ]
83
+ }
84
+ global_tech_df = pd.DataFrame(tech_data)
85
+ # Simulate embeddings as random vectors
86
+ global_tech_embeddings_array = np.random.rand(len(global_tech_df), 128)
87
+ return global_tech_df, global_tech_embeddings_array
88
+
89
+ def set_prompt(problem_description: str) -> str:
90
+ """
91
+ Dummy function to simulate prompt generation.
92
+ """
93
+ return f"Based on the problem: '{problem_description}', what are the key technical challenges and requirements?"
94
+
95
+ def retrieve_constraints(prompt: str) -> list[str]:
96
+ """
97
+ Dummy function to simulate constraint retrieval.
98
+ Returns a few sample constraints based on the prompt.
99
+ """
100
+ if "security" in prompt.lower() or "secure" in prompt.lower():
101
+ return ["high security", "data privacy", "authentication"]
102
+ elif "performance" in prompt.lower() or "speed" in prompt.lower():
103
+ return ["low latency", "high throughput", "scalability"]
104
+ elif "data" in prompt.lower() or "analyze" in prompt.lower():
105
+ return ["data integration", "real-time analytics", "data storage"]
106
+ return ["cost-efficiency", "ease of integration", "maintainability", "scalability"]
107
+
108
+ def stem(text_list: list[str], type_of_text: str) -> list[str]:
109
+ """
110
+ Dummy function to simulate stemming.
111
+ Simplistically removes 'ing', 's', 'es' from words.
112
+ """
113
+ stemmed_list = []
114
+ for text in text_list:
115
+ words = text.split()
116
+ stemmed_words = []
117
+ for word in words:
118
+ word = word.lower()
119
+ if word.endswith("ing"):
120
+ word = word[:-3]
121
+ elif word.endswith("es"):
122
+ word = word[:-2]
123
+ elif word.endswith("s"):
124
+ word = word[:-1]
125
+ stemmed_words.append(word)
126
+ stemmed_list.append(" ".join(stemmed_words))
127
+ return stemmed_list
128
+
129
+ def save_dataframe(df: pd.DataFrame, filename: str):
130
+ """
131
+ Dummy function to simulate saving a DataFrame.
132
+ """
133
+ print(f"Simulating saving DataFrame to {filename}")
134
+ # In a real scenario, you might save to Excel: df.to_excel(filename, index=False)
135
+
136
+ def save_to_pickle(data):
137
+ """
138
+ Dummy function to simulate saving data to a pickle file.
139
+ """
140
+ print(f"Simulating saving data to pickle: {type(data)}")
141
+
142
+ def get_contrastive_similarities(constraints_stemmed: list[str], global_tech_df: pd.DataFrame, global_tech_embeddings: np.ndarray):
143
+ """
144
+ Dummy function to simulate getting contrastive similarities.
145
+ Returns a dummy similarity matrix and result similarities.
146
+ """
147
+ num_constraints = len(constraints_stemmed)
148
+ num_tech = len(global_tech_df)
149
+
150
+ # Simulate a similarity matrix
151
+ # Each row corresponds to a constraint, each column to a technology
152
+ matrix = np.random.rand(num_constraints, num_tech)
153
+ matrix = np.round(matrix, 3) # Round for better display
154
+
155
+ # Simulate result_similarities (e.g., top 3 technologies for each constraint)
156
+ result_similarities = {}
157
+ for i, constraint in enumerate(constraints_stemmed):
158
+ # Get top 3 tech indices for this constraint
159
+ top_tech_indices = np.argsort(matrix[i])[::-1][:3]
160
+ top_tech_names = [global_tech_df.iloc[idx]['name'] for idx in top_tech_indices]
161
+ top_tech_scores = [matrix[i, idx] for idx in top_tech_indices]
162
+ result_similarities[constraint] = list(zip(top_tech_names, top_tech_scores))
163
+
164
+ return result_similarities, matrix
165
+
166
+ def find_best_list_combinations(constraints_stemmed: list[str], global_tech_df: pd.DataFrame, matrix: np.ndarray) -> list[dict]:
167
+ """
168
+ Dummy function to simulate finding best list combinations.
169
+ Returns a few dummy combinations of technologies.
170
+ """
171
+ best_combinations = []
172
+ # Simulate finding combinations that best cover constraints
173
+ for i in range(min(3, len(constraints_stemmed))): # Create up to 3 dummy combinations
174
+ combination = {
175
+ "technologies": [],
176
+ "score": round(random.uniform(0.7, 0.95), 2),
177
+ "covered_constraints": []
178
+ }
179
+ num_tech_in_combo = random.randint(2, 4)
180
+ selected_tech_ids = random.sample(global_tech_df['id'].tolist(), num_tech_in_combo)
181
+ for tech_id in selected_tech_ids:
182
+ tech_name = global_tech_df[global_tech_df['id'] == tech_id]['name'].iloc[0]
183
+ combination["technologies"].append({"id": tech_id, "name": tech_name})
184
+
185
+ # Assign some random constraints to be covered
186
+ num_covered_constraints = random.randint(1, len(constraints_stemmed))
187
+ combination["covered_constraints"] = random.sample(constraints_stemmed, num_covered_constraints)
188
+
189
+ best_combinations.append(combination)
190
+ return best_combinations
191
+
192
+ def select_technologies(best_combinations: list[dict]) -> list[int]:
193
+ """
194
+ Dummy function to simulate selecting technologies based on best combinations.
195
+ Returns a list of unique technology IDs.
196
+ """
197
+ selected_ids = set()
198
+ for combo in best_combinations:
199
+ for tech in combo["technologies"]:
200
+ selected_ids.add(tech["id"])
201
+ return list(selected_ids)
202
+
203
+ def get_technologies_by_id(tech_ids: list[int], global_tech_df: pd.DataFrame) -> list[dict]:
204
+ """
205
+ Dummy function to simulate retrieving technology details by ID.
206
+ """
207
+ selected_technologies = []
208
+ for tech_id in tech_ids:
209
+ tech_info = global_tech_df[global_tech_df['id'] == tech_id]
210
+ if not tech_info.empty:
211
+ selected_technologies.append(tech_info.iloc[0].to_dict())
212
+ return selected_technologies
213
+
214
+ # --- Core Logic (Modified for Gradio Interface) ---
215
+
216
+ # Load global technologies and embeddings once when the app starts
217
+ global_tech_df, global_tech_embeddings_array = load_technologies()
218
+
219
+ def process_input_gradio(problem_description: str):
220
+ """
221
+ Processes the input problem description step-by-step for Gradio.
222
+ Returns all intermediate results.
223
+ """
224
+ # Step 1: Set Prompt
225
+ prompt = set_prompt(problem_description)
226
+
227
+ # Step 2: Retrieve Constraints
228
+ constraints = retrieve_constraints(prompt)
229
+
230
+ # Step 3: Stem Constraints
231
+ constraints_stemmed = stem(constraints, "constraints")
232
+ save_dataframe(pd.DataFrame({"stemmed_constraints": constraints_stemmed}), "constraints_stemmed.xlsx")
233
+
234
+ # Step 4: Global Tech (already loaded, just acknowledge)
235
+ # save_dataframe(global_tech_df, "global_tech.xlsx") # This is already done implicitly by loading
236
+
237
+ # Step 5: Get Contrastive Similarities
238
+ result_similarities, matrix = get_contrastive_similarities(
239
+ constraints_stemmed, global_tech_df, global_tech_embeddings_array
240
+ )
241
+ save_to_pickle(result_similarities)
242
+
243
+ # Step 6: Find Best List Combinations
244
+ best_combinations = find_best_list_combinations(constraints_stemmed, global_tech_df, matrix)
245
+
246
+ # Step 7: Select Technologies
247
+ best_technologies_id = select_technologies(best_combinations)
248
+
249
+ # Step 8: Get Technologies by ID
250
+ best_technologies = get_technologies_by_id(best_technologies_id, global_tech_df)
251
+
252
+ # Format outputs for Gradio
253
+ # Convert numpy array to list of lists for better Gradio display
254
+ matrix_display = matrix.tolist()
255
+
256
+ # Convert result_similarities to a more readable format for Gradio
257
+ result_similarities_display = {
258
+ k: ", ".join([f"{name} ({score:.3f})" for name, score in v])
259
+ for k, v in result_similarities.items()
260
+ }
261
+
262
+ best_combinations_display = json.dumps(best_combinations, indent=2)
263
+ best_technologies_display = json.dumps(best_technologies, indent=2)
264
+
265
+ return (
266
+ prompt,
267
+ ", ".join(constraints),
268
+ ", ".join(constraints_stemmed),
269
+ "Global technologies loaded and ready.", # Acknowledge tech loading
270
+ str(result_similarities_display), # Convert dict to string for display
271
+ pd.DataFrame(matrix_display, index=constraints_stemmed, columns=global_tech_df['name']), # Display matrix as DataFrame
272
+ best_combinations_display,
273
+ ", ".join(map(str, best_technologies_id)),
274
+ best_technologies_display
275
+ )
276
+
277
+ # --- Gradio Interface Setup ---
278
+
279
+ # Define the input and output components
280
+ input_problem = gr.Textbox(
281
+ label="Enter Problem Description",
282
+ placeholder="e.g., Develop a secure and scalable e-commerce platform with real-time analytics."
283
+ )
284
+
285
+ output_prompt = gr.Textbox(label="1. Generated Prompt", interactive=False)
286
+ output_constraints = gr.Textbox(label="2. Retrieved Constraints", interactive=False)
287
+ output_stemmed_constraints = gr.Textbox(label="3. Stemmed Constraints", interactive=False)
288
+ output_tech_loaded = gr.Textbox(label="4. Global Technologies Status", interactive=False)
289
+ output_similarities = gr.Textbox(label="5. Result Similarities (Constraint -> Top Technologies)", interactive=False)
290
+ output_matrix = gr.Dataframe(label="6. Similarity Matrix (Constraints vs. Technologies)", interactive=False)
291
+ output_best_combinations = gr.JSON(label="7. Best Technology Combinations Found", interactive=False)
292
+ output_selected_ids = gr.Textbox(label="8. Selected Technology IDs", interactive=False)
293
+ output_final_technologies = gr.JSON(label="9. Final Best Technologies", interactive=False)
294
+
295
+
296
+ # Create the Gradio Interface
297
+ gr.Interface(
298
+ fn=process_input_gradio,
299
+ inputs=input_problem,
300
+ outputs=[
301
+ output_prompt,
302
+ output_constraints,
303
+ output_stemmed_constraints,
304
+ output_tech_loaded,
305
+ output_similarities,
306
+ output_matrix,
307
+ output_best_combinations,
308
+ output_selected_ids,
309
+ output_final_technologies
310
+ ],
311
+ title="Insight Finder: Step-by-Step Technology Selection",
312
+ description="Enter a problem description to see how relevant technologies are identified through various processing steps.",
313
+ allow_flagging="never"
314
+ ).launch()