Spaces:
Running
Running
Delete src/agent_logic.py
Browse files- src/agent_logic.py +0 -379
src/agent_logic.py
DELETED
@@ -1,379 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import logging
|
3 |
-
import gradio as gr
|
4 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
-
from langchain_openai import ChatOpenAI # Use this for OpenRouter
|
6 |
-
# Remove ChatPromptTemplate import as we'll bypass it for system message
|
7 |
-
from langchain_core.output_parsers import StrOutputParser
|
8 |
-
from langchain_core.messages import HumanMessage, SystemMessage
|
9 |
-
# Import HumanMessagePromptTemplate if needed for other parts, but not for this fix
|
10 |
-
import requests
|
11 |
-
import tempfile
|
12 |
-
import json
|
13 |
-
import re # Import regex for parsing
|
14 |
-
|
15 |
-
logging.basicConfig(level=logging.INFO)
|
16 |
-
|
17 |
-
# --- Constants & Prompts ---
|
18 |
-
|
19 |
-
# Expanded Gemini Model List based on user feedback
|
20 |
-
VALID_GEMINI_MODELS = [
|
21 |
-
"gemini-1.5-pro-latest", # Default/Recommended
|
22 |
-
"gemini-1.5-flash-latest",
|
23 |
-
"gemini-2.5-flash-preview-04-17", # Added
|
24 |
-
"gemini-2.5-pro-preview-03-25", # Added
|
25 |
-
"gemini-2.0-flash", # Added
|
26 |
-
"gemini-2.0-flash-lite", # Added
|
27 |
-
]
|
28 |
-
|
29 |
-
# Define the *significantly enhanced* prompt template for detailed tasks
|
30 |
-
# Escape literal curly braces by doubling them {{ and }} - This is kept for clarity but won't be parsed by Langchain in the new approach
|
31 |
-
system_prompt_base = """You are an expert AI assistant specialized in breaking down software development ideas into **extremely detailed, actionable, step-by-step tasks** for AI-powered coding tools. The user will provide an application idea, a target AI tool, and a desired language (Turkish or English). Your goal is to generate a comprehensive, multi-page plan that the AI tool can follow precisely to build the application.
|
32 |
-
|
33 |
-
**Output Requirements:**
|
34 |
-
1. **Language:** Generate the entire response in the requested language ({language}).
|
35 |
-
2. **Format:** Use a numbered list for the main steps. Use sub-bullets (multiple levels if necessary) for intricate details within each step.
|
36 |
-
3. **Detail Level:** Be **extremely specific and verbose**. The output should be **at least two pages long** when rendered normally. Include:
|
37 |
-
* **Project Setup:** Detailed commands for project initialization (e.g., `npx create-next-app`, `python -m venv venv`), including specific options to select.
|
38 |
-
* **Project Structure:** Define the complete directory and file structure, explaining the purpose of key files/folders.
|
39 |
-
* **Libraries/Dependencies:** List all necessary libraries and exact installation commands (e.g., `npm install react react-dom axios` or `pip install flask sqlalchemy flask-cors`).
|
40 |
-
* **Code Snippets/Examples:** Provide **concrete example code snippets** for key functions, components, or configurations. Do not just describe; show the code.
|
41 |
-
* **Configuration:** Detail necessary configuration files (e.g., `.env`, `tailwind.config.js`, `database.py`) and provide example settings.
|
42 |
-
* **Step-by-Step Instructions:** Break down every complex task into the smallest possible, manageable sub-steps. Leave no ambiguity.
|
43 |
-
* **AI Tool Prompts:** Suggest **very specific, context-rich prompts** the user could give to the target AI tool for each significant code generation or modification step (e.g., "Cursor, in `src/components/AuthForm.js`, generate a React functional component named `AuthForm`. It should include state variables for `email`, `password`, and `isLoading`. Add input fields for email and password, and a submit button. Implement basic form handling with `onSubmit` that logs the email and password for now. Use Tailwind CSS for styling.").
|
44 |
-
4. **Logical Flow:** Ensure tasks are sequential, logical, and build upon each other seamlessly.
|
45 |
-
5. **Completeness:** Cover the entire development lifecycle from setup to basic implementation of core features.
|
46 |
-
|
47 |
-
**Target AI Tool Guidance:**
|
48 |
-
* **Cursor:** Focus on tasks involving code generation, refactoring, debugging within an IDE context, using codebase awareness. Emphasize prompts for generating functions, classes, components, files, and modifying existing code based on context.
|
49 |
-
* **Windsurf:** Assume tasks related to web development (React/Next.js focus). Include detailed prompts for UI generation, component creation (with props and state), styling (e.g., Tailwind CSS classes), API integration, and data fetching logic.
|
50 |
-
* **TRAE AI, CLINE, ROOCODE:** Treat as advanced code generation/completion tools. Provide highly detailed instructions for setting up project structure, defining data models/schemas (e.g., SQL table definitions, ORM models), implementing complex core logic (functions, classes, algorithms), creating API endpoints (with request/response examples), generating UI components, and writing unit/integration tests. Specify extremely detailed prompts for each generation step.
|
51 |
-
* **Genel (General):** Provide a comprehensive software development plan covering requirements analysis, detailed architecture design (including technology stack choices, database schema design), meticulous implementation steps for all major features, testing strategy (unit, integration, e2e), and deployment considerations. Maintain an exceptionally high level of detail suitable for guiding a development team.
|
52 |
-
|
53 |
-
**Example Task Structure (Illustrative - More Detail Needed in Actual Output):**
|
54 |
-
1. **Project Setup (Next.js Blog):**
|
55 |
-
* Open your terminal.
|
56 |
-
* Navigate to your desired projects directory.
|
57 |
-
* Run the command: `npx create-next-app@latest my-detailed-blog`
|
58 |
-
* When prompted:
|
59 |
-
* Select `Yes` for TypeScript.
|
60 |
-
* Select `Yes` for ESLint.
|
61 |
-
* Select `Yes` for Tailwind CSS.
|
62 |
-
* Select `Yes` for `src/` directory.
|
63 |
-
* Select `Yes` for App Router.
|
64 |
-
* Select `No` for customizing the default import alias.
|
65 |
-
* Navigate into the project: `cd my-detailed-blog`
|
66 |
-
* Verify the structure (show key folders like `src/app`, `src/lib`, `public`, `tailwind.config.ts`).
|
67 |
-
2. **Install Dependencies:**
|
68 |
-
* Run: `npm install gray-matter react-markdown date-fns`
|
69 |
-
* Explain purpose: `gray-matter` for frontmatter, `react-markdown` for rendering, `date-fns` for date formatting.
|
70 |
-
3. **Define Blog Post Structure & Content:**
|
71 |
-
* Create a directory: `mkdir content` at the project root.
|
72 |
-
* Inside `content`, create `posts` directory: `mkdir content/posts`
|
73 |
-
* Create an example post file: `content/posts/first-post.md`
|
74 |
-
* Add content with frontmatter:
|
75 |
-
```markdown
|
76 |
-
---
|
77 |
-
title: "My First Detailed Post"
|
78 |
-
date: "2025-05-01"
|
79 |
-
author: "AI Assistant"
|
80 |
-
excerpt: "This is a short summary of the post..."
|
81 |
-
---
|
82 |
-
|
83 |
-
## Introduction
|
84 |
-
|
85 |
-
This is the main content of the blog post, written in Markdown.
|
86 |
-
|
87 |
-
* You can use lists.
|
88 |
-
* And other Markdown features.
|
89 |
-
|
90 |
-
```python
|
91 |
-
print("Hello, World!")
|
92 |
-
```
|
93 |
-
```
|
94 |
-
4. **Implement Post Utility Functions (`src/lib/posts.ts`):**
|
95 |
-
* Create the file: `src/lib/posts.ts`
|
96 |
-
* **Prompt for AI Tool (Cursor):** "In `src/lib/posts.ts`, generate the following TypeScript functions:
|
97 |
-
1. `getPostsDirectory()`: Returns the absolute path to the `content/posts` directory.
|
98 |
-
2. `getAllPostIds()`: Reads all filenames in the posts directory and returns an array of objects like `{{ params: {{ id: 'first-post' }} }}` (without the `.md` extension), suitable for Next.js `generateStaticParams`.
|
99 |
-
3. `getPostData(id: string)`: Takes a post ID (filename without extension), reads the corresponding `.md` file, parses the frontmatter using `gray-matter`, converts the markdown content to HTML using `react-markdown` (or keep as markdown string if rendering client-side), formats the date using `date-fns`, and returns an object containing `id`, `title`, `date`, `author`, `excerpt`, and `contentHtml` (or `contentMarkdown`). Handle potential file read errors.
|
100 |
-
4. `getSortedPostsData()`: Reads all posts, parses frontmatter, and returns an array of post metadata (`id`, `title`, `date`, `author`, `excerpt`) sorted by date in descending order."
|
101 |
-
* Add necessary imports: `fs`, `path`, `gray-matter`, `date-fns`.
|
102 |
-
5. **Create Blog Index Page (`src/app/page.tsx`):**
|
103 |
-
* Clear the default content of `src/app/page.tsx`.
|
104 |
-
* **Prompt for AI Tool (Cursor/Windsurf):** "Update `src/app/page.tsx` to be an async Server Component. Import `getSortedPostsData` from `src/lib/posts`. Call `getSortedPostsData` to fetch the sorted post metadata. Render a heading 'Blog'. Below the heading, map over the sorted posts data and render a list (`<ul>`). Each list item (`<li>`) should contain:
|
105 |
-
* A link (`<Link href={{`/posts/${{post.id}}`}}>`) displaying the `post.title`.
|
106 |
-
* A small text element displaying the formatted `post.date` and `post.author`.
|
107 |
-
* The `post.excerpt` below the title/date.
|
108 |
-
* Use Tailwind CSS classes for styling (e.g., `text-2xl font-bold`, `text-gray-600`, `mt-2`). Import `Link` from `next/link`."
|
109 |
-
6. **Create Dynamic Post Page (`src/app/posts/[id]/page.tsx`):**
|
110 |
-
* Create the directory structure: `src/app/posts/[id]`
|
111 |
-
* Create the file: `src/app/posts/[id]/page.tsx`
|
112 |
-
* **Prompt for AI Tool (Cursor/Windsurf):** "Create `src/app/posts/[id]/page.tsx`. Implement the following:
|
113 |
-
1. Import `getAllPostIds` and `getPostData` from `src/lib/posts`.
|
114 |
-
2. Implement `generateStaticParams` using `getAllPostIds` to pre-render all post pages at build time.
|
115 |
-
3. Define the page component as an async function `Post({{ params }}: {{ params: {{ id: string }} }})`.
|
116 |
-
4. Inside the component, call `getPostData(params.id)` to fetch the specific post data.
|
117 |
-
5. Render the post content:
|
118 |
-
* An `<h1>` with the `post.title`.
|
119 |
-
* A `<div>` with the `post.date` and `post.author`.
|
120 |
-
* A `<div>` where the `post.contentHtml` (or markdown content rendered with `react-markdown`) is displayed. Use Tailwind's `prose` classes for styling the markdown content if applicable.
|
121 |
-
6. Add basic error handling if post data is not found."
|
122 |
-
7. ... (Continue with styling refinements, layout component, potential API routes, etc. - BE EXTREMELY DETAILED)
|
123 |
-
|
124 |
-
Now, generate the **highly detailed, multi-page** task list based on the user's request below. Ensure the output is in **{language}** and includes **example code snippets**.
|
125 |
-
"""
|
126 |
-
|
127 |
-
output_parser = StrOutputParser()
|
128 |
-
|
129 |
-
# --- API Key & Model Management ---
|
130 |
-
|
131 |
-
def validate_gemini_key(api_key):
|
132 |
-
"""Validates a Gemini API key by making a simple list_models call."""
|
133 |
-
if not api_key:
|
134 |
-
return False, "API anahtarı boş."
|
135 |
-
try:
|
136 |
-
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest", google_api_key=api_key, client_options={"api_endpoint": "generativelanguage.googleapis.com"})
|
137 |
-
llm.invoke([HumanMessage(content="Hi")])
|
138 |
-
logging.info("Gemini API key validated successfully.")
|
139 |
-
return True, "Başarıyla Doğrulandı"
|
140 |
-
except Exception as e:
|
141 |
-
logging.error(f"Gemini API key validation failed: {e}")
|
142 |
-
if "API_KEY_INVALID" in str(e):
|
143 |
-
return False, f"Doğrulama Başarısız: Geçersiz API Anahtarı."
|
144 |
-
if "models/gemini-pro" in str(e) or "is not found for API version" in str(e):
|
145 |
-
return False, f"Doğrulama Başarısız: Eski model desteklenmiyor olabilir. Geçerli bir model seçin."
|
146 |
-
return False, f"Doğrulama Başarısız: {e}"
|
147 |
-
|
148 |
-
def validate_openrouter_key(api_key):
|
149 |
-
"""Validates an OpenRouter API key by making a simple call."""
|
150 |
-
if not api_key:
|
151 |
-
return False, "API anahtarı boş."
|
152 |
-
try:
|
153 |
-
response = requests.get(
|
154 |
-
"https://openrouter.ai/api/v1/auth/key",
|
155 |
-
headers={"Authorization": f"Bearer {api_key}"}
|
156 |
-
)
|
157 |
-
response.raise_for_status()
|
158 |
-
logging.info("OpenRouter API key validated successfully.")
|
159 |
-
return True, "Başarıyla Doğrulandı"
|
160 |
-
except requests.exceptions.RequestException as e:
|
161 |
-
logging.error(f"OpenRouter API key validation failed: {e}")
|
162 |
-
status_code = e.response.status_code if e.response is not None else "N/A"
|
163 |
-
if status_code == 401:
|
164 |
-
return False, f"Doğrulama Başarısız (HTTP {status_code}): Geçersiz API Anahtarı."
|
165 |
-
return False, f"Doğrulama Başarısız (HTTP {status_code}): {e}"
|
166 |
-
except Exception as e:
|
167 |
-
logging.error(f"OpenRouter API key validation failed with unexpected error: {e}")
|
168 |
-
return False, f"Doğrulama Başarısız: {e}"
|
169 |
-
|
170 |
-
def save_api_key(api_key, api_type):
|
171 |
-
"""Validates and saves the API key to the state."""
|
172 |
-
if api_type == "gemini":
|
173 |
-
is_valid, status_message = validate_gemini_key(api_key)
|
174 |
-
elif api_type == "openrouter":
|
175 |
-
is_valid, status_message = validate_openrouter_key(api_key)
|
176 |
-
else:
|
177 |
-
return None, "Geçersiz API Tipi"
|
178 |
-
|
179 |
-
if is_valid:
|
180 |
-
return api_key, status_message
|
181 |
-
else:
|
182 |
-
return None, status_message
|
183 |
-
|
184 |
-
def delete_api_key(api_type):
|
185 |
-
"""Deletes the API key from the state."""
|
186 |
-
logging.info(f"Deleting {api_type} API key from state.")
|
187 |
-
return None, "Kaydedilmedi", ""
|
188 |
-
|
189 |
-
def list_available_gemini_models():
|
190 |
-
"""Returns the updated list of predefined Gemini models."""
|
191 |
-
logging.info("Returning updated predefined Gemini models list.")
|
192 |
-
return VALID_GEMINI_MODELS
|
193 |
-
|
194 |
-
def list_available_openrouter_models():
|
195 |
-
"""Lists available models from OpenRouter API and returns a list."""
|
196 |
-
try:
|
197 |
-
logging.info("Fetching OpenRouter models...")
|
198 |
-
response = requests.get("https://openrouter.ai/api/v1/models")
|
199 |
-
response.raise_for_status()
|
200 |
-
models_data = response.json().get("data", [])
|
201 |
-
preferred_vendors = ["google", "anthropic", "openai", "mistralai", "meta-llama"]
|
202 |
-
models = sorted(
|
203 |
-
[m["id"] for m in models_data],
|
204 |
-
key=lambda x: ([p not in x.lower() for p in preferred_vendors], x)
|
205 |
-
)
|
206 |
-
logging.info(f"Fetched {len(models)} OpenRouter models.")
|
207 |
-
if not models:
|
208 |
-
models = ["google/gemini-pro"] # Fallback
|
209 |
-
return models
|
210 |
-
except requests.exceptions.RequestException as e:
|
211 |
-
logging.error(f"Error fetching OpenRouter models: {e}")
|
212 |
-
gr.Warning(f"OpenRouter modelleri listelenemedi: {e}")
|
213 |
-
return ["google/gemini-pro"] # Fallback list
|
214 |
-
except Exception as e:
|
215 |
-
logging.error(f"Error processing OpenRouter models: {e}")
|
216 |
-
gr.Warning(f"OpenRouter modelleri işlenirken hata: {e}")
|
217 |
-
return ["google/gemini-pro"] # Fallback list
|
218 |
-
|
219 |
-
# --- Task Generation Logic ---
|
220 |
-
|
221 |
-
def get_llm(gemini_key, openrouter_key, gemini_model_name, openrouter_model_name):
|
222 |
-
"""Initializes and returns the appropriate LLM based on provided keys and model names."""
|
223 |
-
llm = None
|
224 |
-
api_used = None
|
225 |
-
last_error = None
|
226 |
-
|
227 |
-
if gemini_key and gemini_model_name in VALID_GEMINI_MODELS:
|
228 |
-
try:
|
229 |
-
llm = ChatGoogleGenerativeAI(model=gemini_model_name, google_api_key=gemini_key, convert_system_message_to_human=True)
|
230 |
-
logging.info(f"Attempting to use Gemini LLM ({gemini_model_name}).")
|
231 |
-
llm.invoke([HumanMessage(content="Hi")])
|
232 |
-
logging.info(f"Successfully initialized Gemini LLM ({gemini_model_name}).")
|
233 |
-
api_used = "Gemini"
|
234 |
-
return llm, api_used
|
235 |
-
except Exception as e:
|
236 |
-
logging.error(f"Failed to initialize Gemini ({gemini_model_name}): {e}")
|
237 |
-
last_error = e
|
238 |
-
llm = None
|
239 |
-
elif gemini_key:
|
240 |
-
logging.warning(f"Selected Gemini model {gemini_model_name} is not in the valid list. Skipping Gemini.")
|
241 |
-
last_error = ValueError(f"Geçersiz Gemini modeli seçildi: {gemini_model_name}")
|
242 |
-
|
243 |
-
if openrouter_key and llm is None:
|
244 |
-
try:
|
245 |
-
if not openrouter_model_name:
|
246 |
-
raise ValueError("OpenRouter modeli seçilmedi.")
|
247 |
-
llm = ChatOpenAI(
|
248 |
-
model=openrouter_model_name,
|
249 |
-
openai_api_key=openrouter_key,
|
250 |
-
openai_api_base="https://openrouter.ai/api/v1",
|
251 |
-
)
|
252 |
-
logging.info(f"Attempting to use OpenRouter LLM ({openrouter_model_name}).")
|
253 |
-
llm.invoke([HumanMessage(content="Hi")])
|
254 |
-
logging.info(f"Successfully initialized OpenRouter LLM ({openrouter_model_name}).")
|
255 |
-
api_used = "OpenRouter"
|
256 |
-
return llm, api_used
|
257 |
-
except Exception as e:
|
258 |
-
logging.error(f"Failed to initialize OpenRouter ({openrouter_model_name}): {e}")
|
259 |
-
if last_error is None or not gemini_key:
|
260 |
-
last_error = e
|
261 |
-
|
262 |
-
if llm is None:
|
263 |
-
if not gemini_key and not openrouter_key:
|
264 |
-
raise gr.Error("API anahtarı sağlanmadı.")
|
265 |
-
elif last_error:
|
266 |
-
if isinstance(last_error, ValueError) and "OpenRouter modeli seçilmedi" in str(last_error):
|
267 |
-
raise gr.Error(str(last_error))
|
268 |
-
if "API_KEY_INVALID" in str(last_error) or "401" in str(last_error):
|
269 |
-
raise gr.Error("LLM başlatılamadı: Geçersiz API Anahtarı.")
|
270 |
-
raise gr.Error(f"LLM başlatılamadı. Son hata: {last_error}")
|
271 |
-
else:
|
272 |
-
raise gr.Error("LLM başlatılırken bilinmeyen bir hata oluştu.")
|
273 |
-
else:
|
274 |
-
return llm, api_used
|
275 |
-
|
276 |
-
def generate_tasks_for_idea(idea, gemini_key, openrouter_key, target_tool, gemini_model_name, openrouter_model_name, language):
|
277 |
-
"""Generates tasks based on an application idea using Langchain and an LLM, in the specified language."""
|
278 |
-
logging.info(f"Generating tasks for idea: {idea[:50]}... , Target Tool: {target_tool}, Language: {language}")
|
279 |
-
|
280 |
-
if not idea:
|
281 |
-
yield "Hata: Lütfen bir uygulama fikri girin."
|
282 |
-
return
|
283 |
-
|
284 |
-
if not gemini_key and not openrouter_key:
|
285 |
-
yield "Hata: Lütfen en az bir API anahtarı (Gemini veya OpenRouter) girin."
|
286 |
-
return
|
287 |
-
|
288 |
-
try:
|
289 |
-
llm, api_used = get_llm(gemini_key, openrouter_key, gemini_model_name, openrouter_model_name)
|
290 |
-
logging.info(f"Successfully initialized LLM using {api_used}.")
|
291 |
-
|
292 |
-
# --- Alternative Prompt Approach ---
|
293 |
-
# 1. Format the system prompt with the language
|
294 |
-
formatted_system_prompt = system_prompt_base.format(language=language)
|
295 |
-
|
296 |
-
# 2. Create the human message content
|
297 |
-
human_message_content = f"Application Idea: {idea}\nTarget AI Tool: {target_tool}"
|
298 |
-
|
299 |
-
# 3. Create the messages list manually
|
300 |
-
messages = [
|
301 |
-
SystemMessage(content=formatted_system_prompt),
|
302 |
-
HumanMessage(content=human_message_content)
|
303 |
-
]
|
304 |
-
# --- End of Alternative Approach ---
|
305 |
-
|
306 |
-
logging.info("Invoking LLM directly with messages list...")
|
307 |
-
# Invoke the LLM directly with the list of messages
|
308 |
-
response_stream = llm.stream(messages)
|
309 |
-
|
310 |
-
full_response = ""
|
311 |
-
yield "Detaylı görevler oluşturuluyor, lütfen bekleyin...\n\n" # Initial message
|
312 |
-
for chunk in response_stream:
|
313 |
-
# Access the content attribute of the AIMessage chunk
|
314 |
-
if hasattr(chunk, 'content'):
|
315 |
-
full_response += chunk.content
|
316 |
-
yield full_response
|
317 |
-
else:
|
318 |
-
# Handle potential non-AIMessage chunks if necessary (e.g., errors)
|
319 |
-
logging.warning(f"Received unexpected chunk type: {type(chunk)}")
|
320 |
-
|
321 |
-
logging.info("Detailed task generation complete.")
|
322 |
-
|
323 |
-
except gr.Error as e:
|
324 |
-
logging.error(f"Gradio Error during task generation: {e}")
|
325 |
-
yield f"Hata: {e}"
|
326 |
-
except Exception as e:
|
327 |
-
# Check specifically for the template formatting error again (shouldn't happen now)
|
328 |
-
if "unexpected '{' in field name" in str(e):
|
329 |
-
logging.error(f"Template formatting error persisted unexpectedly: {e}", exc_info=True)
|
330 |
-
yield f"Hata: Prompt şablonu hatası devam ediyor. Lütfen geliştiriciye bildirin. Hata: {e}"
|
331 |
-
else:
|
332 |
-
logging.error(f"Error during task generation: {e}", exc_info=True)
|
333 |
-
yield f"Görev oluşturulurken bir hata oluştu: {e}. Lütfen API anahtarlarınızı, model seçiminizi ve ağ bağlantınızı kontrol edin."
|
334 |
-
|
335 |
-
# --- Task Downloading Logic ---
|
336 |
-
|
337 |
-
def format_tasks_for_download(tasks_text, file_format):
|
338 |
-
"""Formats the generated tasks text into the specified file format and returns the file path."""
|
339 |
-
if not tasks_text or tasks_text.startswith("Detaylı görevler oluşturuluyor"):
|
340 |
-
gr.Warning("İndirilecek geçerli görev bulunmuyor.")
|
341 |
-
return None
|
342 |
-
|
343 |
-
try:
|
344 |
-
suffix = ".txt"
|
345 |
-
content = tasks_text
|
346 |
-
if file_format == "Markdown":
|
347 |
-
suffix = ".md"
|
348 |
-
content = tasks_text # Assume LLM output is already markdown
|
349 |
-
elif file_format == "JSON":
|
350 |
-
suffix = ".json"
|
351 |
-
tasks_list = []
|
352 |
-
current_task_lines = []
|
353 |
-
task_pattern = re.compile(r"^\s*\d+\.\s+")
|
354 |
-
|
355 |
-
for line in tasks_text.strip().split("\n"):
|
356 |
-
line_strip = line.strip()
|
357 |
-
if task_pattern.match(line_strip):
|
358 |
-
if current_task_lines:
|
359 |
-
tasks_list.append("\n".join(current_task_lines).strip())
|
360 |
-
current_task_lines = [task_pattern.sub("", line_strip)]
|
361 |
-
elif line_strip or current_task_lines:
|
362 |
-
current_task_lines.append(line)
|
363 |
-
|
364 |
-
if current_task_lines:
|
365 |
-
tasks_list.append("\n".join(current_task_lines).strip())
|
366 |
-
|
367 |
-
content = json.dumps({"tasks": tasks_list}, indent=4, ensure_ascii=False)
|
368 |
-
|
369 |
-
with tempfile.NamedTemporaryFile(mode="w", suffix=suffix, delete=False, encoding="utf-8") as temp_file:
|
370 |
-
temp_file.write(content)
|
371 |
-
file_path = temp_file.name
|
372 |
-
logging.info(f"Tasks formatted as {file_format} and saved to temporary file: {file_path}")
|
373 |
-
return file_path
|
374 |
-
|
375 |
-
except Exception as e:
|
376 |
-
logging.error(f"Error formatting tasks for download: {e}")
|
377 |
-
gr.Error(f"Görevler indirilirken hata oluştu: {e}")
|
378 |
-
return None
|
379 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|