amos1088 commited on
Commit
88d1237
·
1 Parent(s): 24a908d

test gradio

Browse files
Files changed (1) hide show
  1. app.py +20 -100
app.py CHANGED
@@ -1,4 +1,7 @@
1
  import gradio as gr
 
 
 
2
  from huggingface_hub import login
3
  import os
4
  import spaces,tempfile
@@ -14,108 +17,25 @@ import openai,json
14
 
15
  token = os.getenv("HF_TOKEN")
16
  login(token=token)
17
- openai_token = os.getenv("OPENAI_TOKEN")
18
- openai.api_key = openai_token
19
- openaiclient = openai.OpenAI(api_key=openai.api_key)
20
-
21
- def ask_gpt(massage_history,model="gpt-4o-mini",return_str=True,response_format={"type": "json_object"}):
22
- response = openaiclient.chat.completions.create(
23
- model=model,
24
- messages=massage_history,
25
- response_format=response_format,
26
- max_tokens=4000, )
27
-
28
- if return_str:
29
- return response.choices[0].message.content
30
- else:
31
- return json.loads(response.choices[0].message.content)
32
-
33
-
34
- image_pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16).to("cuda")
35
- image_pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
36
-
37
-
38
-
39
- @spaces.GPU
40
- def generate_image(prompt, reference_image, controlnet_conditioning_scale):
41
- style_images = [load_image(f.name) for f in reference_image]
42
-
43
- image_pipeline.set_ip_adapter_scale(controlnet_conditioning_scale)
44
-
45
- image = image_pipeline(
46
- prompt=prompt,
47
- ip_adapter_image=[style_images],
48
- negative_prompt="",
49
- guidance_scale=5,
50
- num_inference_steps=30,
51
- ).images[0]
52
-
53
- return image
54
-
55
- model_id = "Lykon/DreamShaper"
56
- motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-3"
57
- controlnet_id = "guoyww/animatediff-sparsectrl-rgb"
58
- lora_adapter_id = "guoyww/animatediff-motion-lora-v1-5-3"
59
- vae_id = "stabilityai/sd-vae-ft-mse"
60
- device = "cuda"
61
-
62
- motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id, torch_dtype=torch.float16).to(device)
63
- controlnet = SparseControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16).to(device)
64
- vae = AutoencoderKL.from_pretrained(vae_id, torch_dtype=torch.float16).to(device)
65
- scheduler = DPMSolverMultistepScheduler.from_pretrained(
66
- model_id,
67
- subfolder="scheduler",
68
- beta_schedule="linear",
69
- algorithm_type="dpmsolver++",
70
- use_karras_sigmas=True,
71
- )
72
- gif_pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
73
- model_id,
74
- motion_adapter=motion_adapter,
75
- controlnet=controlnet,
76
- vae=vae,
77
- scheduler=scheduler,
78
- torch_dtype=torch.float16,
79
- ).to(device)
80
- gif_pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora")
81
-
82
-
83
- @spaces.GPU
84
- def generate_gif(prompt, reference_image, controlnet_conditioning_scale,style_conditioning_scale,num_frames):
85
- style_image = generate_image(prompt, reference_image, float(style_conditioning_scale))
86
-
87
- video = gif_pipe(
88
- prompt=prompt,
89
- negative_prompt="low quality, worst quality",
90
- num_inference_steps=25,
91
- conditioning_frames=[style_image],
92
- controlnet_frame_indices=[0],
93
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
94
- num_frames=int(num_frames)
95
- ).frames[0]
96
- export_to_gif(video, "output.gif")
97
-
98
- yield ([style_image], "output.gif")
99
-
100
- # Set up Gradio interface
101
- interface = gr.Interface(
102
- fn=generate_gif,
103
- inputs=[
104
- gr.Textbox(label="Prompt"),
105
- # gr.Image( type= "filepath",label="Reference Image (Style)"),
106
- gr.File(type="filepath",file_count="multiple",label="Reference Image (Style)"),
107
- gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
108
- gr.Slider(label="Style Scale", minimum=0, maximum=1.0, step=0.1, value=0.6),
109
- gr.Slider(label="Number of frames", minimum=0, maximum=100.0, step=1.0, value=10.0),
110
-
111
- ],
112
- outputs=["gallery","image"],
113
- title="Image Generation with Stable Diffusion 3 medium and ControlNet",
114
- description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
115
 
116
- )
 
 
117
 
118
- interface.launch()
119
 
 
 
 
 
120
 
 
 
 
 
 
121
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
4
+ import gradio as gr
5
  from huggingface_hub import login
6
  import os
7
  import spaces,tempfile
 
17
 
18
  token = os.getenv("HF_TOKEN")
19
  login(token=token)
20
+ model_id = "stabilityai/stable-diffusion-2-base"
21
+ scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
22
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ lora_path = "Jl-wei/ui-diffuser-v2"
25
+ pipe.load_lora_weights(lora_path)
26
+ pipe.to("cuda")
27
 
 
28
 
29
+ def gui_generation(text, num_imgs):
30
+ prompt = f"Mobile app: {text}"
31
+ images = pipe(prompt, num_inference_steps=30, guidance_scale=7.5, height=512, width=288, num_images_per_prompt=num_imgs).images
32
+ yield images
33
 
34
+ with gr.Blocks() as demo:
35
+ gallery = gr.Gallery(columns=[3], rows=[1], object_fit="contain", height="auto")
36
+ number_slider = gr.Slider(1, 30, value=2, step=1, label="Batch size")
37
+ prompt_box = gr.Textbox(label="Prompt", placeholder="Health monittoring report")
38
+ gr.Interface(gui_generation, inputs=[prompt_box, number_slider], outputs=gallery)
39
 
40
+ if __name__ == "__main__":
41
+ demo.launch()