Spaces:
Runtime error
Runtime error
File size: 1,680 Bytes
f966bca ebb7007 7015481 ebb7007 f966bca ebb7007 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from transformers import StableDiffusionPipeline
import torch
from PIL import Image
import requests
def generate_image(prompt):
# Load the preprocessing and model pipeline
# Here, we assume the Kvikontent/midjourney-v6 model has text-to-image capabilities in a manner similar to stable diffusion.
# This part needs verification and adjustment according to actual model documentation and availability.
model_id = "Kvikontent/midjourney-v6"
device = "cuda" if torch.cuda.is_available() else "cpu"
# Setup the model pipeline (this can be adjusted if the model's actual interface differs)
# This example uses the typical usage pattern for generative models, but you should adjust according to the actual model's specs.
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True) # Replace with actual method to load Kvikontent/midjourney-v6 if different
pipe = pipe.to(device)
# Generating the image
image = pipe(prompt).images[0] # This line assumes the return type is accessible like this, adjust this according to actual usage.
# Convert tensor to PIL Image (adjust if the output format differs)
image = Image.fromarray(image.numpy(), 'RGB')
return image
# Create a Gradio interface
iface = gr.Interface(fn=generate_image,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="image",
title="Text to Image Generator",
description="Type some text and generate an image using the Kvikontent/midjourney-v6 model.")
# Running the application
iface.launch() |