Alexey Mametyev commited on
Commit
7dcf767
·
unverified ·
2 Parent(s): 56c6de1 af82021

Merge pull request #2 from freQuensy23-coder/codex/удалить-main.py-и-заменить-на-demo.py

Browse files
Files changed (1) hide show
  1. main.py +0 -157
main.py DELETED
@@ -1,157 +0,0 @@
1
- import asyncio
2
- import time
3
- from google import genai
4
- import os
5
- from dotenv import load_dotenv
6
- from google.genai.chats import Chat
7
- from manim_video_generator.video_executor import VideoExecutor
8
- from prompts import SYSTEM_PROMPT_SCENARIO_GENERATOR, SYSTEM_PROMPT_CODEGEN, REVIEW_PROMPT
9
- from google.genai.types import (
10
- GenerateContentResponse,
11
- ThinkingConfig,
12
- GenerateContentConfig,
13
- UploadFileConfig,
14
- )
15
- from pathlib import Path
16
- import traceback
17
-
18
- load_dotenv()
19
-
20
-
21
-
22
- async def main():
23
- client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
24
- video_executor = VideoExecutor()
25
-
26
- chat: Chat = client.chats.create(model="gemini-2.5-flash-preview-05-20")
27
-
28
- user_task = input("Enter your task: ")
29
- assert (
30
- len(user_task) > 0 and len(user_task) < 10000
31
- ), "Task must be between 1 and 10000 characters"
32
-
33
- user_input = SYSTEM_PROMPT_SCENARIO_GENERATOR + "\n\n" + user_task
34
- # Generate scenario
35
- for iter in range(1000):
36
- answer = ""
37
- chunk: GenerateContentResponse
38
- for chunk in chat.send_message_stream(
39
- user_input,
40
- config=GenerateContentConfig(
41
- thinking_config=ThinkingConfig(
42
- include_thoughts=True,
43
- ),
44
- ),
45
- ):
46
- print()
47
- if chunk.candidates:
48
- candidate = chunk.candidates[0]
49
- if candidate.content and candidate.content.parts:
50
- for part in candidate.content.parts:
51
- if part.thought:
52
- print('💭: ', part.text, end="", flush=True)
53
- elif part.text:
54
- print(part.text, end="", flush=True)
55
- answer += part.text
56
- user_input = input("Answer answer to scenario manager or continue (c)?")
57
- if user_input.lower() in ("c", "continue", 'с'):
58
- print("Scenario created")
59
- scenario = answer
60
- break
61
-
62
- # Generate code
63
- user_input = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
64
- print('Generating code...')
65
- for iter in range(1000):
66
- answer = ""
67
- chunk: GenerateContentResponse
68
- for chunk in chat.send_message_stream(
69
- user_input,
70
- config=GenerateContentConfig(
71
- thinking_config=ThinkingConfig(
72
- include_thoughts=True,
73
- ),
74
- ),
75
- ):
76
- print()
77
- if chunk.candidates:
78
- candidate = chunk.candidates[0]
79
- if candidate.content and candidate.content.parts:
80
- for part in candidate.content.parts:
81
- if part.thought:
82
- print('💭: ', part.text, end="", flush=True)
83
- elif part.text:
84
- print(part.text, end="", flush=True)
85
- answer += part.text
86
- try:
87
- code = answer.split("```python")[1].split("```")[0]
88
- except Exception as e:
89
- print(f"Error: {e}")
90
- user_input = f"Error, your answer is not valid formated manim code."
91
- continue
92
-
93
-
94
- try:
95
- video_path: Path = video_executor.execute_manim_code(code)
96
- print(f"Video generated at {video_path}")
97
- except Exception as e:
98
- print(f"Error: {e}")
99
- traceback_str = traceback.format_exc()
100
- user_input = f"Error, your code is not valid: {e}. Please fix it. Traceback: {traceback_str}"
101
- continue
102
-
103
- myfile = client.files.upload(file=video_path.absolute(),
104
- config=UploadFileConfig(
105
- display_name=video_path.name
106
- ))
107
- assert myfile.name, "File name is not set"
108
- assert myfile.state, "File state is not set"
109
-
110
- print('Uploading video file to google genai...')
111
- while myfile.state.name == "PROCESSING":
112
- print('.', end='', flush=True)
113
- time.sleep(10)
114
- myfile = client.files.get(name=myfile.name)
115
- print(f"File uploaded at {myfile.name}")
116
-
117
- if myfile.state.name == "FAILED":
118
- raise ValueError(myfile.state.name)
119
-
120
- print(f"File uploaded at {myfile.name}")
121
-
122
-
123
-
124
- for chunk in chat.send_message_stream(
125
- [myfile, REVIEW_PROMPT],
126
- config=GenerateContentConfig(
127
- thinking_config=ThinkingConfig(
128
- include_thoughts=True,
129
- ),
130
- ),
131
- ):
132
- if chunk.candidates:
133
- candidate = chunk.candidates[0]
134
- if candidate.content and candidate.content.parts:
135
- for part in candidate.content.parts:
136
- if part.text:
137
- if part.thought:
138
- print('💭: ', part.text, end="", flush=True)
139
- else:
140
- print(part.text, end="", flush=True)
141
- answer += part.text
142
- if "no issues found" in answer.lower():
143
- print("No issues found")
144
- break
145
- else:
146
- print("Issues found")
147
- user_prompt = input("Prompt for fixing issues (or n to exit): ")
148
- if user_prompt.lower() == 'n':
149
- break
150
- else:
151
- user_input = f"Fix this problems please."
152
- if user_prompt.strip():
153
- user_input += f" TIP: {user_prompt}"
154
-
155
-
156
- if __name__ == "__main__":
157
- asyncio.run(main())