import gradio as gr from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks # Load a valid Text-to-Video model from ModelScope video_model = pipeline(Tasks.text_to_video_synthesis, model='damo-vilab/text-to-video-synthesis') def generate_pokemon_video(prompt, style, duration, image): """ Generate a Pokémon-themed video. The prompt is augmented with the chosen style. """ # Modify prompt with chosen style full_prompt = f"{prompt}, Pokémon style: {style}" # Prepare the input dictionary inputs = {'text': full_prompt, 'duration': duration} if image: inputs['image'] = image # Include image if provided # Generate the video using the ModelScope pipeline result = video_model(inputs) return result["output_video"] # Build the Gradio UI with gr.Blocks() as iface: gr.Markdown("# 🎥 PokeVidGen - AI Pokémon Shorts Generator") gr.Markdown("Enter a prompt, choose a Pokémon style, adjust video duration, and (optionally) upload an image to generate a Pokémon-themed video!") with gr.Row(): prompt = gr.Textbox(label="Enter Pokémon Scene", placeholder="Pikachu using Thunderbolt in a forest") style = gr.Dropdown(["Anime", "Classic Pokémon", "Modern 3D"], label="Pokémon Style", value="Anime") duration = gr.Slider(1, 10, step=1, label="Video Duration (Seconds)", value=5) image = gr.Image(label="Upload an Image (Optional)", type="filepath") generate_btn = gr.Button("Generate Pokémon Video") output_video = gr.Video(label="Generated Pokémon Video") generate_btn.click(generate_pokemon_video, inputs=[prompt, style, duration, image], outputs=output_video) iface.launch()