Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,44 +1,203 @@
|
|
1 |
-
import os
|
2 |
-
import requests
|
3 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
#
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
try:
|
25 |
-
response =
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
# Launch the app
|
43 |
-
|
44 |
-
iface.launch()
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import os
|
3 |
+
from huggingface_hub import HfApi
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
5 |
+
|
6 |
+
# Hugging Face token and space info
|
7 |
+
HF_TOKEN = "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"
|
8 |
+
HF_SPACE = "Hardik5456/Wan2.1playground"
|
9 |
+
|
10 |
+
# Initialize Hugging Face API
|
11 |
+
api = HfApi(token=HF_TOKEN)
|
12 |
+
|
13 |
+
# We'll use Mistral-7B as it's a good balance of quality and performance
|
14 |
+
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.2"
|
15 |
+
|
16 |
+
# Load configuration content
|
17 |
+
with open("spectral_satya_gpt_configuration.md", "r") as f:
|
18 |
+
config_content = f.read()
|
19 |
+
|
20 |
+
with open("spectral_satya_book_integration.md", "r") as f:
|
21 |
+
book_integration = f.read()
|
22 |
+
|
23 |
+
with open("spectral_satya_prompt_examples.md", "r") as f:
|
24 |
+
prompt_examples = f.read()
|
25 |
+
|
26 |
+
# System prompt that incorporates the configuration
|
27 |
+
SYSTEM_PROMPT = f"""
|
28 |
+
You are Spectral Satya, a specialized AI assistant focused on crafting cinematic reality through expert prompt engineering for AI video generation.
|
29 |
|
30 |
+
{config_content}
|
31 |
+
|
32 |
+
Your responses should follow the principles and techniques outlined in "The Prompt Engineer's Codex: Crafting Cinematic Reality with AI Video".
|
33 |
+
"""
|
34 |
+
|
35 |
+
# Initialize tokenizer and model
|
36 |
+
try:
|
37 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
38 |
+
pipe = pipeline(
|
39 |
+
"text-generation",
|
40 |
+
model=MODEL_NAME,
|
41 |
+
tokenizer=tokenizer,
|
42 |
+
max_new_tokens=1024,
|
43 |
+
temperature=0.7,
|
44 |
+
top_p=0.95,
|
45 |
+
repetition_penalty=1.15
|
46 |
+
)
|
47 |
+
except Exception as e:
|
48 |
+
print(f"Error loading model: {e}")
|
49 |
+
# Fallback to a smaller model if the main one fails
|
50 |
+
MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
51 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
52 |
+
pipe = pipeline(
|
53 |
+
"text-generation",
|
54 |
+
model=MODEL_NAME,
|
55 |
+
tokenizer=tokenizer,
|
56 |
+
max_new_tokens=512,
|
57 |
+
temperature=0.7,
|
58 |
+
top_p=0.95,
|
59 |
+
repetition_penalty=1.15
|
60 |
+
)
|
61 |
+
|
62 |
+
def format_prompt(user_input, chat_history):
|
63 |
+
"""Format the prompt for the model with chat history."""
|
64 |
+
messages = []
|
65 |
+
|
66 |
+
# Add system prompt
|
67 |
+
messages.append({"role": "system", "content": SYSTEM_PROMPT})
|
68 |
+
|
69 |
+
# Add chat history
|
70 |
+
for user_msg, assistant_msg in chat_history:
|
71 |
+
messages.append({"role": "user", "content": user_msg})
|
72 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
73 |
+
|
74 |
+
# Add current user input
|
75 |
+
messages.append({"role": "user", "content": user_input})
|
76 |
+
|
77 |
+
# Format for the model
|
78 |
+
formatted_prompt = ""
|
79 |
+
for message in messages:
|
80 |
+
if message["role"] == "system":
|
81 |
+
formatted_prompt += f"<s>[INST] <<SYS>>\n{message['content']}\n<</SYS>>\n\n"
|
82 |
+
elif message["role"] == "user":
|
83 |
+
if formatted_prompt:
|
84 |
+
formatted_prompt += f"{message['content']} [/INST]"
|
85 |
+
else:
|
86 |
+
formatted_prompt += f"<s>[INST] {message['content']} [/INST]"
|
87 |
+
elif message["role"] == "assistant":
|
88 |
+
formatted_prompt += f" {message['content']} </s><s>[INST] "
|
89 |
+
|
90 |
+
return formatted_prompt
|
91 |
+
|
92 |
+
def generate_response(user_input, chat_history):
|
93 |
+
"""Generate a response using the model."""
|
94 |
+
prompt = format_prompt(user_input, chat_history)
|
95 |
+
|
96 |
try:
|
97 |
+
response = pipe(prompt)[0]['generated_text']
|
98 |
+
# Extract only the new content (the model's response)
|
99 |
+
response = response.split('[/INST]')[-1].strip()
|
100 |
+
if '</s>' in response:
|
101 |
+
response = response.split('</s>')[0].strip()
|
102 |
+
|
103 |
+
# Clean up any remaining tags
|
104 |
+
response = response.replace('<s>', '').replace('</s>', '').replace('[INST]', '').replace('[/INST]', '')
|
105 |
+
|
106 |
+
return response
|
107 |
+
except Exception as e:
|
108 |
+
return f"I apologize, but I encountered an error: {str(e)}. Please try again with a different query."
|
109 |
+
|
110 |
+
def spectral_satya_chat(user_input, chat_history):
|
111 |
+
"""Main chat function for the Gradio interface."""
|
112 |
+
response = generate_response(user_input, chat_history)
|
113 |
+
chat_history.append((user_input, response))
|
114 |
+
return "", chat_history
|
115 |
+
|
116 |
+
def generate_cinematic_prompt(scene_description, subject_details, camera_preferences, lighting_mood, platform):
|
117 |
+
"""Generate a complete cinematic prompt based on user inputs."""
|
118 |
+
prompt = f"""
|
119 |
+
Please create a detailed cinematic prompt for the following scene:
|
120 |
+
|
121 |
+
Scene Description: {scene_description}
|
122 |
+
Subject Details: {subject_details}
|
123 |
+
Camera Preferences: {camera_preferences}
|
124 |
+
Lighting and Mood: {lighting_mood}
|
125 |
+
Target Platform: {platform}
|
126 |
+
|
127 |
+
I need both a positive prompt and a negative prompt that follows the principles from The Prompt Engineer's Codex.
|
128 |
+
"""
|
129 |
+
|
130 |
+
chat_history = []
|
131 |
+
response = generate_response(prompt, chat_history)
|
132 |
+
|
133 |
+
return response
|
134 |
+
|
135 |
+
def show_examples():
|
136 |
+
"""Return example prompts from the configuration."""
|
137 |
+
return prompt_examples
|
138 |
+
|
139 |
+
# Define the Gradio interface
|
140 |
+
with gr.Blocks(title="Spectral Satya - Cinematic Prompt Engineer") as demo:
|
141 |
+
gr.Markdown("# 🎬 Spectral Satya - Cinematic Prompt Engineer")
|
142 |
+
gr.Markdown("### Your specialized AI assistant for crafting cinematic reality through expert prompt engineering")
|
143 |
+
|
144 |
+
with gr.Tab("Chat Interface"):
|
145 |
+
chatbot = gr.Chatbot(height=500)
|
146 |
+
msg = gr.Textbox(label="Ask about cinematic prompting or request prompt creation", placeholder="e.g., 'Create a cinematic prompt for a detective in a rainy alleyway'")
|
147 |
+
clear = gr.Button("Clear")
|
148 |
+
|
149 |
+
msg.submit(spectral_satya_chat, [msg, chatbot], [msg, chatbot])
|
150 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
151 |
+
|
152 |
+
with gr.Tab("Prompt Generator"):
|
153 |
+
with gr.Row():
|
154 |
+
with gr.Column():
|
155 |
+
scene_description = gr.Textbox(label="Scene Description", placeholder="Describe the overall scene you want to create", lines=3)
|
156 |
+
subject_details = gr.Textbox(label="Subject Details", placeholder="Describe the main character/subject in detail", lines=2)
|
157 |
+
camera_preferences = gr.Textbox(label="Camera Preferences", placeholder="Any specific shot types, angles, or movements", lines=2)
|
158 |
+
lighting_mood = gr.Textbox(label="Lighting and Mood", placeholder="Describe the lighting conditions and emotional tone", lines=2)
|
159 |
+
platform = gr.Dropdown(
|
160 |
+
label="Target Platform",
|
161 |
+
choices=["RunwayML", "Pika", "Kling", "Haiper", "Vidu", "Veo", "PixVerse", "Any/Universal"],
|
162 |
+
value="Any/Universal"
|
163 |
+
)
|
164 |
+
generate_btn = gr.Button("Generate Cinematic Prompt")
|
165 |
+
|
166 |
+
with gr.Column():
|
167 |
+
output = gr.Textbox(label="Generated Prompt", lines=15)
|
168 |
+
|
169 |
+
generate_btn.click(
|
170 |
+
generate_cinematic_prompt,
|
171 |
+
[scene_description, subject_details, camera_preferences, lighting_mood, platform],
|
172 |
+
output
|
173 |
+
)
|
174 |
+
|
175 |
+
with gr.Tab("Example Prompts"):
|
176 |
+
examples_output = gr.Markdown()
|
177 |
+
show_examples_btn = gr.Button("Show Example Prompts")
|
178 |
+
show_examples_btn.click(show_examples, None, examples_output)
|
179 |
+
|
180 |
+
with gr.Tab("About"):
|
181 |
+
gr.Markdown("""
|
182 |
+
## About Spectral Satya
|
183 |
+
|
184 |
+
Spectral Satya is a specialized AI assistant focused on crafting cinematic reality through expert prompt engineering for AI video generation. Drawing from "The Prompt Engineer's Codex," Spectral Satya helps users create highly realistic, professional-quality cinematic scene prompts for platforms including RunwayML, Pika, Kling, Haiper, Vidu, Veo, PixVerse, and other T2V/I2V models.
|
185 |
+
|
186 |
+
### Core Principles
|
187 |
+
|
188 |
+
- **Realism Above All**: Always prioritizes photorealistic, cinematic quality
|
189 |
+
- **Specificity is King**: Eliminates vagueness in all prompts
|
190 |
+
- **Show, Don't Tell**: Uses visual language that paints clear pictures
|
191 |
+
- **Defensive Prompting**: Includes robust negative prompts to ward off unwanted styles
|
192 |
+
|
193 |
+
### How to Use
|
194 |
+
|
195 |
+
1. **Chat Interface**: Ask questions about cinematic prompting or request specific prompts
|
196 |
+
2. **Prompt Generator**: Fill in the form fields to generate structured cinematic prompts
|
197 |
+
3. **Example Prompts**: Browse example prompts across different scenarios
|
198 |
+
|
199 |
+
Created by Hardik Kumawat
|
200 |
+
""")
|
201 |
|
202 |
# Launch the app
|
203 |
+
demo.launch()
|
|