Ryouko65777 commited on
Commit
07923f2
1 Parent(s): fcd6234

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import DiffusionPipeline
3
+ from PIL import Image
4
+ import torch
5
+
6
+ # Load the model and LoRA weights
7
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.float16)
8
+ pipe = pipe.to("cuda") # Assuming you're using a GPU
9
+ pipe.load_lora_weights("enhanceaiteam/Flux-Uncensored-V2")
10
+
11
+ # Function for text-to-image
12
+ def text_to_image(prompt):
13
+ image = pipe(prompt).images[0]
14
+ return image
15
+
16
+ # Function for image-to-image
17
+ def image_to_image(init_image, prompt):
18
+ init_image = init_image.convert("RGB") # Ensuring image is RGB
19
+ image = pipe(prompt, image=init_image).images[0] # Passing the image as the initial image for editing
20
+ return image
21
+
22
+ # Gradio UI
23
+ with gr.Blocks() as demo:
24
+ gr.Markdown("# Flux Uncensored V2 Demo")
25
+
26
+ with gr.Tab("Text-to-Image"):
27
+ with gr.Row():
28
+ text_prompt = gr.Textbox(label="Enter your prompt")
29
+ generated_image = gr.Image(label="Generated Image")
30
+ generate_button = gr.Button("Generate")
31
+
32
+ # Connect the button to text-to-image function
33
+ generate_button.click(fn=text_to_image, inputs=text_prompt, outputs=generated_image)
34
+
35
+ with gr.Tab("Image-to-Image"):
36
+ with gr.Row():
37
+ init_image = gr.Image(source="upload", label="Upload Initial Image", type="pil")
38
+ edit_prompt = gr.Textbox(label="Enter your prompt")
39
+ edited_image = gr.Image(label="Edited Image")
40
+ edit_button = gr.Button("Generate from Image")
41
+
42
+ # Connect the button to image-to-image function
43
+ edit_button.click(fn=image_to_image, inputs=[init_image, edit_prompt], outputs=edited_image)
44
+
45
+ demo.launch()