GamerC0der commited on
Commit
ebb7007
·
verified ·
1 Parent(s): 3de574b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -48
app.py CHANGED
@@ -1,51 +1,34 @@
1
  import gradio as gr
2
- import requests
 
3
  from PIL import Image
4
- import base64
5
- from io import BytesIO
6
-
7
- def query_hf_image_generation(api_key, prompt):
8
- API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
9
- headers = {
10
- "Authorization": f"Bearer {api_key}",
11
- "Content-Type": "application/json"
12
- }
13
- data = {"inputs": prompt}
14
-
15
- response = requests.post(API_URL, headers=headers, json=data)
16
-
17
- if response.status_code != 200:
18
- return f"Error: Received HTTP {response.status_code} - {response.text}"
19
-
20
- try:
21
- result = response.json()
22
- except ValueError:
23
- return f"Error decoding JSON: Unexpected response format {response.text}"
24
-
25
- if 'error' in result:
26
- return f"Error: {result['error']}"
27
-
28
- if 'data' in result:
29
- try:
30
- base64_string = result['data'][0]
31
- base64_data = base64_string.split(",")[1] if "," in base64_string else base64_string
32
- image_data = base64.b64decode(base64_data)
33
- image = Image.open(BytesIO(image_data))
34
- return image
35
- except Exception as e:
36
- return f"Error processing image data: {e}"
37
- else:
38
- return "Error: Missing 'data' in the response."
39
-
40
- iface = gr.Interface(
41
- fn=query_hf_image_generation,
42
- inputs=[
43
- gr.Textbox(label="Hugging Face API Key", placeholder="Enter your Hugging Face API Key here..."),
44
- gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
45
- ],
46
- outputs=gr.Image(label="Generated Image"),
47
- title="Stable Diffusion XL Image Generator",
48
- description="Enter your API Key and a prompt to generate an image using the Stable Diffusion XL model from Hugging Face."
49
- )
50
 
51
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import StableDiffusionPipeline
3
+ import torch
4
  from PIL import Image
5
+ import requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ def generate_image(prompt):
8
+ # Load the preprocessing and model pipeline
9
+ # Here, we assume the Kvikontent/midjourney-v6 model has text-to-image capabilities in a manner similar to stable diffusion.
10
+ # This part needs verification and adjustment according to actual model documentation and availability.
11
+ model_id = "Kvikontent/midjourney-v6"
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ # Setup the model pipeline (this can be adjusted if the model's actual interface differs)
15
+ # This example uses the typical usage pattern for generative models, but you should adjust according to the actual model's specs.
16
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True) # Replace with actual method to load Kvikontent/midjourney-v6 if different
17
+ pipe = pipe.to(device)
18
+
19
+ # Generating the image
20
+ image = pipe(prompt).images[0] # This line assumes the return type is accessible like this, adjust this according to actual usage.
21
+
22
+ # Convert tensor to PIL Image (adjust if the output format differs)
23
+ image = Image.fromarray(image.numpy(), 'RGB')
24
+ return image
25
+
26
+ # Create a Gradio interface
27
+ iface = gr.Interface(fn=generate_image,
28
+ inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
29
+ outputs="image",
30
+ title="Text to Image Generator",
31
+ description="Type some text and generate an image using the Kvikontent/midjourney-v6 model.")
32
+
33
+ # Running the application
34
+ iface.launch()