K00B404 commited on
Commit
4993c6e
·
verified ·
1 Parent(s): d8bcd28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -88
app.py CHANGED
@@ -11,94 +11,9 @@ import json
11
  # Project by Nymbo
12
  from diffusers import DiffusionPipeline
13
 
14
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt")
15
 
16
- prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
17
- image = pipe(prompt).images[0]
18
 
19
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
20
- API_TOKEN = os.getenv("HF_READ_TOKEN")
21
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
22
- timeout = 100
23
 
24
- # Function to query the API and return the generated image
25
- def query(prompt, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
26
- if prompt == "" or prompt is None:
27
- return None
28
-
29
- key = random.randint(0, 999)
30
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
31
- # Translate the prompt from Russian to English if necessary
32
- prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
33
- print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
34
- # Add some extra flair to the prompt
35
- prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
36
- print(f'\033[1mGeneration {key}:\033[0m {prompt}')
37
- # Prepare the payload for the API call, including width and height
38
- payload = {
39
- "inputs": prompt,
40
- "is_negative": is_negative,
41
- "steps": steps,
42
- "cfg_scale": cfg_scale,
43
- "seed": seed if seed != -1 else random.randint(1, 1000000000),
44
- "strength": strength,
45
- "parameters": {
46
- "width": width, # Pass the width to the API
47
- "height": height # Pass the height to the API
48
- }
49
- }
50
-
51
- # Send the request to the API and handle the response
52
- response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
53
- if response.status_code != 200:
54
- print(f"Error: Failed to get image. Response status: {response.status_code}")
55
- print(f"Response content: {response.text}")
56
- if response.status_code == 503:
57
- raise gr.Error(f"{response.status_code} : The model is being loaded")
58
- raise gr.Error(f"{response.status_code}")
59
- try:
60
- # Convert the response content into an image
61
- image_bytes = response.content
62
- image = Image.open(io.BytesIO(image_bytes))
63
- print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
64
- return image
65
- except Exception as e:
66
- print(f"Error when trying to open the image: {e}")
67
- return None
68
-
69
- # ... (CSS and other code remains the same)
70
- title="<title>FluxiFloXStrot</title>"
71
- # Build the Gradio UI with Blocks
72
- with gr.Blocks() as app:
73
- gr.HTML(title)
74
-
75
- with gr.Row():
76
- gr.HTML('<div id="neon-cursor" class="neon-cursor"></div>')
77
-
78
- with gr.Column(elem_id="app-container"):
79
- with gr.Row():
80
- with gr.Column(elem_id="prompt-container"):
81
- with gr.Row():
82
- text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input")
83
-
84
- with gr.Row():
85
- with gr.Accordion("Advanced Settings", open=False):
86
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
87
- with gr.Row():
88
- width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32)
89
- height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32)
90
- steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
91
- cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
92
- strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
93
- seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
94
- method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
95
-
96
- with gr.Row():
97
- text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
98
-
99
- with gr.Row():
100
- image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
101
-
102
- text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
103
-
104
- app.launch(show_api=True, share=False)
 
11
  # Project by Nymbo
12
  from diffusers import DiffusionPipeline
13
 
 
14
 
15
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid")
 
16
 
17
+ image = Image.open("19616f3c-1419-43a1-baa7-c3c6ee8b0980.png") #
 
 
 
18
 
19
+ image = pipe(image=image).images[0]