zyad-se commited on
Commit
81f9934
·
verified ·
1 Parent(s): 1c8b3d4

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitignore +42 -0
  2. README.md +9 -0
  3. app.py +175 -0
  4. prompt_enhancer.py +314 -0
  5. requirements.txt +62 -0
.gitignore ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ env/
8
+ build/
9
+ develop-eggs/
10
+ dist/
11
+ downloads/
12
+ eggs/
13
+ .eggs/
14
+ lib/
15
+ lib64/
16
+ parts/
17
+ sdist/
18
+ var/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ venv/
25
+ ENV/
26
+ env/
27
+
28
+ # Environment variables
29
+ .env
30
+
31
+ # IDE files
32
+ .idea/
33
+ .vscode/
34
+ *.swp
35
+ *.swo
36
+
37
+ # OS files
38
+ .DS_Store
39
+ Thumbs.db
40
+
41
+ # Logs
42
+ *.log
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ title: Advanced Prompt Generator
2
+ emoji: 🚀
3
+ colorFrom: indigo
4
+ colorTo: purple
5
+ sdk: gradio
6
+ sdk_version: 4.13.0
7
+ app_file: app.py
8
+ pinned: false
9
+ license: mit
app.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ import gradio as gr
4
+ from dotenv import load_dotenv
5
+ import time
6
+ from prompt_enhancer import PromptEnhancer, get_available_models
7
+
8
+ # Load environment variables
9
+ load_dotenv(encoding='utf-8')
10
+
11
+ # Check if running on Hugging Face Spaces
12
+ IS_HF_SPACE = os.environ.get("SPACE_ID") is not None
13
+
14
+ # Configure API Key
15
+ if IS_HF_SPACE:
16
+ # Use the Hugging Face Spaces secret
17
+ api_key = os.environ.get("OPENROUTER_API_KEY")
18
+ else:
19
+ # Use local .env file
20
+ api_key = os.getenv("OPENROUTER_API_KEY")
21
+
22
+ if not api_key:
23
+ print("Warning: OPENROUTER_API_KEY not found!")
24
+
25
+ # Cache for available models
26
+ available_models = []
27
+
28
+ async def fetch_models():
29
+ """Fetch available models from OpenRouter"""
30
+ global available_models
31
+ try:
32
+ models = await get_available_models()
33
+ available_models = models
34
+ return [f"{model['id']} - {model.get('name', 'No name')}" for model in models]
35
+ except Exception as e:
36
+ print(f"Error fetching models: {e}")
37
+ # Fallback models if API call fails
38
+ return [
39
+ "anthropic/claude-3-haiku - Claude 3 Haiku",
40
+ "anthropic/claude-3-sonnet - Claude 3 Sonnet",
41
+ "anthropic/claude-3-opus - Claude 3 Opus",
42
+ "openai/gpt-4o - GPT-4o",
43
+ "openai/gpt-4o-mini - GPT-4o Mini"
44
+ ]
45
+
46
+ def get_model_id(model_display_name):
47
+ """Extract model ID from display name"""
48
+ if " - " in model_display_name:
49
+ return model_display_name.split(" - ")[0]
50
+ return model_display_name
51
+
52
+ async def enhance_prompt(prompt, model_choice):
53
+ """Enhance the prompt using the selected model"""
54
+ if not prompt.strip():
55
+ return "Please enter a prompt to enhance.", "", ""
56
+
57
+ start_time = time.time()
58
+
59
+ model_id = get_model_id(model_choice)
60
+ enhancer = PromptEnhancer(model_id)
61
+
62
+ try:
63
+ # Process prompt
64
+ expanded_prompt = await enhancer.analyze_and_expand_input(prompt)
65
+ suggested_enhancements = await enhancer.suggest_enhancements(prompt)
66
+ decomposition_and_reasoning = await enhancer.decompose_and_add_reasoning(expanded_prompt)
67
+
68
+ # Assemble components
69
+ components = {
70
+ "expanded_prompt": expanded_prompt,
71
+ "decomposition_and_reasoninng": decomposition_and_reasoning,
72
+ "suggested_enhancements": suggested_enhancements
73
+ }
74
+
75
+ advanced_prompt = await enhancer.assemble_prompt(components)
76
+
77
+ elapsed_time = time.time() - start_time
78
+
79
+ # Generate summary
80
+ stats = f"""
81
+ Model: {model_id}
82
+ Processing Time: {elapsed_time:.2f} seconds
83
+ Prompt Tokens: {enhancer.prompt_tokens}
84
+ Completion Tokens: {enhancer.completion_tokens}
85
+ """
86
+
87
+ return advanced_prompt, expanded_prompt, stats
88
+ except Exception as e:
89
+ return f"Error: {str(e)}", "", ""
90
+
91
+ # Function to run async operations from Gradio
92
+ def run_async(fn):
93
+ def wrapper(*args, **kwargs):
94
+ return asyncio.run(fn(*args, **kwargs))
95
+ return wrapper
96
+
97
+ # Create the Gradio interface
98
+ async def create_ui():
99
+ # Get initial model list
100
+ model_choices = await fetch_models()
101
+ default_model = model_choices[0] if model_choices else "anthropic/claude-3-haiku - Claude 3 Haiku"
102
+
103
+ with gr.Blocks(title="Advanced Prompt Generator", theme=gr.themes.Soft()) as app:
104
+ gr.Markdown("""
105
+ # 🚀 Advanced Prompt Generator
106
+
107
+ Transform your basic prompts into highly optimized, structured prompts for better AI responses.
108
+
109
+ ## How it works:
110
+ 1. Enter your basic prompt
111
+ 2. Select an AI model
112
+ 3. Get an enhanced, structured prompt with decomposition and reasoning
113
+ """)
114
+
115
+ with gr.Row():
116
+ with gr.Column(scale=3):
117
+ prompt_input = gr.Textbox(
118
+ label="Enter Your Basic Prompt",
119
+ placeholder="E.g. Explain quantum computing",
120
+ lines=4
121
+ )
122
+ model_dropdown = gr.Dropdown(
123
+ choices=model_choices,
124
+ label="Select Model",
125
+ value=default_model
126
+ )
127
+ refresh_button = gr.Button("🔄 Refresh Models")
128
+
129
+ with gr.Row():
130
+ submit_button = gr.Button("🔮 Enhance Prompt", variant="primary")
131
+ clear_button = gr.Button("🧹 Clear")
132
+
133
+ with gr.Column(scale=4):
134
+ with gr.Tabs():
135
+ with gr.TabItem("Enhanced Prompt"):
136
+ enhanced_output = gr.Textbox(
137
+ label="Enhanced Prompt",
138
+ placeholder="Your enhanced prompt will appear here...",
139
+ lines=15
140
+ )
141
+ with gr.TabItem("Expanded Prompt Only"):
142
+ expanded_output = gr.Textbox(
143
+ label="Expanded Prompt",
144
+ placeholder="Your expanded prompt will appear here...",
145
+ lines=15
146
+ )
147
+ with gr.TabItem("Stats"):
148
+ stats_output = gr.Textbox(
149
+ label="Processing Stats",
150
+ lines=5
151
+ )
152
+
153
+ # Define event handlers
154
+ refresh_button.click(
155
+ fn=run_async(fetch_models),
156
+ outputs=model_dropdown
157
+ )
158
+
159
+ submit_button.click(
160
+ fn=run_async(enhance_prompt),
161
+ inputs=[prompt_input, model_dropdown],
162
+ outputs=[enhanced_output, expanded_output, stats_output]
163
+ )
164
+
165
+ clear_button.click(
166
+ fn=lambda: ("", "", ""),
167
+ outputs=[enhanced_output, expanded_output, stats_output]
168
+ )
169
+
170
+ return app
171
+
172
+ # Launch the app
173
+ if __name__ == "__main__":
174
+ app = asyncio.run(create_ui())
175
+ app.launch(debug=True)
prompt_enhancer.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import asyncio
4
+ import aiohttp
5
+ import json
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables
9
+ load_dotenv(encoding='utf-8')
10
+
11
+ # Check if running on Hugging Face Spaces
12
+ IS_HF_SPACE = os.environ.get("SPACE_ID") is not None
13
+
14
+ async def get_available_models():
15
+ """Get a list of available models from OpenRouter"""
16
+ if IS_HF_SPACE:
17
+ api_key = os.environ.get("OPENROUTER_API_KEY")
18
+ else:
19
+ api_key = os.getenv("OPENROUTER_API_KEY")
20
+
21
+ if not api_key:
22
+ raise Exception("OPENROUTER_API_KEY not found in environment variables")
23
+
24
+ url = "https://openrouter.ai/api/v1/models"
25
+
26
+ headers = {
27
+ "Authorization": f"Bearer {api_key}",
28
+ "Content-Type": "application/json"
29
+ }
30
+
31
+ async with aiohttp.ClientSession() as session:
32
+ async with session.get(url, headers=headers) as response:
33
+ if response.status != 200:
34
+ error_text = await response.text()
35
+ raise Exception(f"OpenRouter API error: {response.status}, {error_text}")
36
+
37
+ data = await response.json()
38
+ return data["data"]
39
+
40
+
41
+ # Defining the PromptEnhancer class
42
+ class PromptEnhancer:
43
+ def __init__(self, model="anthropic/claude-3-haiku", tools_dict={}):
44
+ self.model = model
45
+ self.prompt_tokens = 0
46
+ self.completion_tokens = 0
47
+ self.tools_dict = tools_dict
48
+
49
+ # Get API key based on environment
50
+ if IS_HF_SPACE:
51
+ self.api_key = os.environ.get("OPENROUTER_API_KEY")
52
+ else:
53
+ self.api_key = os.getenv("OPENROUTER_API_KEY")
54
+
55
+ self.base_url = "https://openrouter.ai/api/v1"
56
+
57
+ if not self.api_key:
58
+ print("Error: API Key is not loaded!")
59
+ else:
60
+ print(f"API Key Loaded: {self.api_key[:5]}********")
61
+
62
+ async def call_llm(self, prompt):
63
+ """Call the LLM with the given prompt using OpenRouter"""
64
+ headers = {
65
+ "Authorization": f"Bearer {self.api_key}",
66
+ "Content-Type": "application/json",
67
+ "HTTP-Referer": "https://huggingface.co" if IS_HF_SPACE else "http://localhost:3000"
68
+ }
69
+
70
+ data = {
71
+ "model": self.model,
72
+ "messages": [
73
+ {"role": "system",
74
+ "content":
75
+ "You are a highly intelligent AI assistant. Your task is to analyze, and comprehend the provided prompt,\
76
+ then provide clear, and concise response based strictly on the given instructions.\
77
+ Do not include any additional explanations or context beyond the required output."
78
+ },
79
+ {"role": "user",
80
+ "content": prompt
81
+ }
82
+ ],
83
+ "temperature": 0.0, # from 0 (precise) to 2 (creative)
84
+ }
85
+
86
+ async with aiohttp.ClientSession() as session:
87
+ async with session.post(f"{self.base_url}/chat/completions", headers=headers, json=data) as response:
88
+ if response.status != 200:
89
+ error_text = await response.text()
90
+ raise Exception(f"OpenRouter API error: {response.status}, {error_text}")
91
+
92
+ response_data = await response.json()
93
+
94
+ # Update token counts if available in the response
95
+ if "usage" in response_data:
96
+ self.prompt_tokens += response_data["usage"].get("prompt_tokens", 0)
97
+ self.completion_tokens += response_data["usage"].get("completion_tokens", 0)
98
+
99
+ return response_data["choices"][0]["message"]["content"]
100
+
101
+
102
+ async def analyze_and_expand_input(self, input_prompt):
103
+ analysis_and_expansion_prompt = f"""
104
+ You are a highly intelligent assistant.
105
+ Analyze the provided {{prompt}} and generate concise answers for the following key aspects:
106
+
107
+ - **Main goal of the prompt:** Identify the core subject or request within the provided prompt.
108
+ - **Persona:** Recommend the most relevant persona for the AI model to adopt (e.g., expert, teacher, conversational, etc.)
109
+ - **Optimal output length:** Suggest an optimal output length (short, brief, medium, long) based on the task, and give an approximate number of words if it is suitable for the case.
110
+ - **Most convenient output format:** Recommend the optimal format for the result (e.g., list, paragraph, code snippet, table, JSON, etc.).
111
+ - **Specific requirements:** Highlight any special conditions, rules, or expectations stated or implied within the prompt.
112
+ - **Suggested improvements:** Offer recommendations on how to modify or enhance the prompt for more precise or efficient output generation.
113
+ - **One-shot prompting:** Create one related examples to guide the output generation.
114
+
115
+ Then use them to reformulate and expand the provided {{prompt}}.
116
+ Return the expanded prompt as output in text format. Refrain from explaining the generation process.
117
+
118
+ Example 1:
119
+ {{prompt}}: "Explain quantum entanglement to a 10-year-old."
120
+
121
+ *thought_process*:
122
+ - **Main goal of the prompt:** Simplify complex quantum physics concept for children.
123
+ - **Persona:** Patient, friendly teacher
124
+ - **Optimal output length:** Brief (100-150 words)
125
+ - **Most convenient output format:** Narrative with analogy
126
+ - **Specific requirements:** Age-appropriate explanation (10-year-old).
127
+ - **Suggested improvements:**
128
+ - Request specific analogies
129
+ - Include interactive elements
130
+ - Add follow-up questions
131
+ - Suggest visual aids
132
+ - **One-shot prompting:**
133
+ Output example:
134
+ "Imagine you have two special pairs of socks. When you put one sock in your room and the other sock in the kitchen,
135
+ something magical happens! Whatever happens to one sock instantly affects the other sock.
136
+ If you turn one sock inside out, the other sock automatically turns inside out too, no matter how far apart they are!"
137
+
138
+ *output*:
139
+ As a friendly science teacher, please explain quantum entanglement to a 10-year-old student using these guidelines:
140
+
141
+ Start with a relatable analogy using everyday objects
142
+ Use simple, clear language avoiding technical terms
143
+ Include 2-3 interactive examples that demonstrate the concept
144
+ Add fun facts that will spark curiosity
145
+ End with simple questions to check understanding
146
+ Keep the explanation brief (100-150 words)
147
+
148
+ Structure your explanation as:
149
+
150
+ Opening analogy
151
+ Main explanation with examples
152
+ Interactive "What if?" scenarios
153
+ Fun facts about quantum entanglement
154
+ Check-for-understanding questions
155
+
156
+ Remember to maintain an enthusiastic and encouraging tone throughout the explanation.
157
+
158
+ Output example:
159
+ Imagine you have two special pairs of socks. When you put one sock in your room and the other sock in the kitchen,
160
+ something magical happens! Whatever happens to one sock instantly affects the other sock.
161
+ If you turn one sock inside out, the other sock automatically turns inside out too, no matter how far apart they are!
162
+
163
+ Example 2:
164
+ {{prompt}}: "Write a function to calculate the Fibonacci sequence up to n terms."
165
+
166
+ *thought_process*:
167
+ - **Main goal of the prompt:** Create a programming function that generates Fibonacci numbers
168
+ - **Persona:** Programming expert
169
+ - **Optimal output length:** Medium (150-200 words including code)
170
+ - **Most convenient output format:** Code snippet with explanatory comments
171
+ - **Specific requirements:** Function must accept parameter n for sequence length
172
+ - **Suggested improvements:**
173
+ - Specify programming language
174
+ - Clarify if 0 should be included as first term
175
+ - Define expected handling of negative inputs
176
+ - **One-shot prompting:**
177
+
178
+ *output*:
179
+ As an expert programmer, please create a well-documented function to generate the Fibonacci sequence.
180
+
181
+ Requirements:
182
+ Accept a parameter 'n' specifying the number of terms to generate
183
+ Handle edge cases (n <= 0, n == 1)
184
+ Return the sequence as a list/array
185
+ Include proper error handling
186
+ Add comments explaining the logic
187
+
188
+ Provide the implementation in Python, including:
189
+ Function definition with docstring
190
+ Input validation
191
+ Core algorithm
192
+ Example usage with outputs for n=5, n=1, and n=0
193
+
194
+ For reference, the sequence should start with [0, 1, ...] where each subsequent number is the sum of the previous two numbers.
195
+
196
+
197
+ Now, analyze the following prompt then return only the generated *output*:
198
+ {{prompt}}: {input_prompt}
199
+ """
200
+
201
+ return await self.call_llm(analysis_and_expansion_prompt)
202
+
203
+
204
+ async def decompose_and_add_reasoning(self, expanded_prompt):
205
+ decomposition_and_reasoning_prompt = f"""
206
+ You are a highly capable AI assistant tasked with improving complex task execution.
207
+ Analyze the provided {{prompt}}, and use it to generate the following output:
208
+
209
+ - **Subtasks decomposition:** Break down the task described in the prompt into manageable and specific subtasks that the AI model needs to address.
210
+ - **Chain-of-thought reasoning:** For subtasks that involve critical thinking or complex steps, add reasoning using a step-by-step approach to improve decision-making and output quality.
211
+ - **Success criteria:** Define what constitutes a successful completion for each subtask, ensuring clear guidance for expected results.
212
+
213
+ Return the following structured output for each subtask:
214
+
215
+ 1. **Subtask description**: Describe a specific subtask.
216
+ 2. **Reasoning**: Provide reasoning or explanation for why this subtask is essential or how it should be approached.
217
+ 3. **Success criteria**: Define what successful completion looks like for this subtask.
218
+
219
+ Example 1:
220
+ {{Prompt}}: "Explain how machine learning models are evaluated using cross-validation."
221
+
222
+ ##THOUGHT PROCESS##
223
+ *Subtask 1*:
224
+ - **Description**: Define cross-validation and its purpose.
225
+ - **Reasoning**: Clarifying the concept ensures the reader understands the basic mechanism behind model evaluation.
226
+ - **Success criteria**: The explanation should include a clear definition of cross-validation and its role in assessing model performance.
227
+ *Subtask 2*:
228
+ - **Description**: Describe how cross-validation splits data into training and validation sets.
229
+ - **Reasoning**: Explaining the split is crucial to understanding how models are validated and tested for generalization.
230
+ - **Success criteria**: A proper explanation of k-fold cross-validation with an illustration of how data is split.
231
+ *Subtask 3*:
232
+ - **Description**: Discuss how cross-validation results are averaged to provide a final evaluation metric.
233
+ - **Reasoning**: Averaging results helps mitigate the variance in performance due to different training/validation splits.
234
+ - **Success criteria**: The output should clearly explain how the final model evaluation is derived from multiple iterations of cross-validation.
235
+
236
+ Now, analyze the following expanded prompt and return the subtasks, reasoning, and success criteria.
237
+ Prompt: {expanded_prompt}
238
+ """
239
+ return await self.call_llm(decomposition_and_reasoning_prompt)
240
+
241
+
242
+
243
+ async def suggest_enhancements(self, input_prompt, tools_dict={}):
244
+ enhancement_suggestion_prompt = f"""
245
+ You are a highly intelligent assistant specialized in reference suggestion and tool integration.
246
+ Analyze the provided {{input_prompt}} and the available {{tools_dict}} to recommend enhancements:
247
+
248
+ - **Reference necessity:** Determine if additional reference materials would benefit the task execution (e.g., websites, documentations, books, articles, etc.)
249
+ - **Tool applicability:** Evaluate if any available tools could enhance efficiency or accuracy
250
+ - **Integration complexity:** Assess the effort required to incorporate suggested resources
251
+ - **Expected impact:** Estimate the potential improvement in output quality
252
+
253
+ If enhancements are warranted, provide structured recommendations in this format:
254
+
255
+ ##REFERENCE SUGGESTIONS##
256
+ (Only if applicable, maximum 3)
257
+ - Reference name/type
258
+ - Purpose: How it enhances the output
259
+ - Integration: How to incorporate it
260
+
261
+ ##TOOL SUGGESTIONS##
262
+ (Only if applicable, maximum 3)
263
+ - Tool name from tools_dict
264
+ - Purpose: How it improves the task
265
+ - Integration: How to implement it
266
+
267
+ If no enhancements would significantly improve the output, return an empty string ""
268
+
269
+ Example 1:
270
+ {{input_prompt}}: "Write a Python function to detect faces in images using computer vision."
271
+ {{tools_dict}}: {{}}
272
+ *output*:
273
+ ##REFERENCE SUGGESTIONS##
274
+ - OpenCV Face Detection Documentation
275
+ Purpose: Provides implementation details and best practices
276
+ Integration: Reference for optimal parameter settings and cascade classifier usage
277
+
278
+ Now, analyze the following prompt and tools, then return only the generated *output*:
279
+ {{input_prompt}}: {input_prompt}
280
+ {{tools_dict}}: {tools_dict}
281
+ """
282
+ return await self.call_llm(enhancement_suggestion_prompt)
283
+
284
+
285
+ async def assemble_prompt(self, components):
286
+ expanded_prompt = components.get("expanded_prompt", "")
287
+ decomposition_and_reasoninng = components.get("decomposition_and_reasoninng", "")
288
+ suggested_enhancements = components.get("suggested_enhancements", "")
289
+
290
+ output_prompt = (
291
+ f"{expanded_prompt}\n\n"
292
+ f"{suggested_enhancements}\n\n"
293
+ f"{decomposition_and_reasoninng}"
294
+ )
295
+ return output_prompt
296
+
297
+
298
+ async def enhance_prompt(self, input_prompt):
299
+ """Main method to enhance a prompt"""
300
+ tools_dict = {}
301
+
302
+ expanded_prompt = await self.analyze_and_expand_input(input_prompt)
303
+ suggested_enhancements = await self.suggest_enhancements(input_prompt, tools_dict)
304
+ decomposition_and_reasoning = await self.decompose_and_add_reasoning(expanded_prompt)
305
+
306
+ components = {
307
+ "expanded_prompt": expanded_prompt,
308
+ "decomposition_and_reasoninng": decomposition_and_reasoning,
309
+ "suggested_enhancements": suggested_enhancements
310
+ }
311
+
312
+ output_prompt = await self.assemble_prompt(components)
313
+
314
+ return output_prompt
requirements.txt ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.9.5
2
+ aiosignal==1.3.1
3
+ annotated-types==0.7.0
4
+ anyio==4.4.0
5
+ attrs==23.2.0
6
+ certifi==2024.7.4
7
+ charset-normalizer==3.3.2
8
+ click==8.1.7
9
+ distro==1.9.0
10
+ dnspython==2.6.1
11
+ email_validator==2.2.0
12
+ fastapi==0.111.1
13
+ fastapi-cli==0.0.4
14
+ frozenlist==1.4.1
15
+ h11==0.14.0
16
+ httpcore==1.0.5
17
+ httptools==0.6.1
18
+ httpx==0.27.0
19
+ idna==3.7
20
+ Jinja2==3.1.4
21
+ jsonpatch==1.33
22
+ jsonpointer==3.0.0
23
+ langchain==0.2.10
24
+ langchain-core==0.2.22
25
+ langchain-openai==0.1.17
26
+ langchain-text-splitters==0.2.2
27
+ langsmith==0.1.93
28
+ markdown-it-py==3.0.0
29
+ MarkupSafe==2.1.5
30
+ mdurl==0.1.2
31
+ multidict==6.0.5
32
+ numpy==1.26.4
33
+ openai==1.35.15
34
+ orjson==3.10.6
35
+ packaging==24.1
36
+ pydantic==2.8.2
37
+ pydantic_core==2.20.1
38
+ Pygments==2.18.0
39
+ python-dotenv==1.0.1
40
+ python-multipart==0.0.9
41
+ PyYAML==6.0.1
42
+ regex==2024.5.15
43
+ requests==2.32.3
44
+ rich==13.7.1
45
+ shellingham==1.5.4
46
+ sniffio==1.3.1
47
+ SQLAlchemy==2.0.31
48
+ starlette==0.37.2
49
+ tenacity==8.5.0
50
+ tiktoken==0.7.0
51
+ tqdm==4.66.4
52
+ typer==0.12.3
53
+ typing_extensions==4.12.2
54
+ urllib3==2.2.2
55
+ uvicorn==0.30.3
56
+ uvloop==0.19.0
57
+ watchfiles==0.22.0
58
+ websockets==12.0
59
+ yarl==1.9.4
60
+ gradio>=4.13.0
61
+ aiohttp>=3.9.1
62
+ python-dotenv>=1.0.0