geethareddy commited on
Commit
8b81348
·
verified ·
1 Parent(s): 87a3669

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -84
app.py CHANGED
@@ -1,97 +1,68 @@
1
- import os
2
  import gradio as gr
3
  import requests
4
- from huggingface_hub import InferenceApi
5
- import nltk
6
- from nltk.tokenize import sent_tokenize
7
- from dotenv import load_dotenv
8
-
9
- # Load environment variables from .env file if present
10
- load_dotenv()
11
 
12
- # Ensure NLTK sentence tokenizer data is downloaded
13
- nltk.download('punkt')
14
 
15
- # Retrieve Hugging Face API key from environment variable
16
- HF_API_KEY = os.getenv('onteddu')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- if not HF_API_KEY:
19
- raise ValueError("Please set the 'HUGGINGFACE_API_KEY' environment variable.")
 
20
 
21
- # Initialize Hugging Face Inference API with a suitable image generation model
22
- # You can choose a different model if preferred
23
- MODEL_NAME = "black-forest-labs/FLUX.1-schnell"
24
- inference = InferenceApi(repo_id=MODEL_NAME, token=HF_API_KEY)
 
 
 
25
 
26
- def generate_comic_images(story):
27
- """
28
- Takes a short story as input, splits it into sentences, generates an image for each sentence,
29
- and returns the list of image URLs.
30
- """
31
- # Split the story into sentences
32
- sentences = sent_tokenize(story)
33
-
34
- if len(sentences) == 0:
35
- return ["No sentences found in the input story."]
36
-
37
- images = []
38
-
39
- for idx, sentence in enumerate(sentences):
40
- # Optionally, you can modify the prompt to better suit comic-style images
41
- prompt = f"Comic book style illustration of: {sentence}"
42
-
43
- try:
44
- # Generate image using Hugging Face Inference API
45
- output = inference(prompt)
46
-
47
- # The output can be a URL or binary data depending on the model
48
- # Here we assume it's a URL to the generated image
49
- if isinstance(output, str):
50
- image_url = output
51
- elif isinstance(output, dict) and 'image' in output:
52
- image_url = output['image']
53
- else:
54
- image_url = None
55
-
56
- if image_url:
57
- images.append(image_url)
58
- else:
59
- images.append("Image generation failed for this sentence.")
60
- except Exception as e:
61
- images.append(f"Error: {str(e)}")
62
-
63
- return images
64
 
65
- def create_comic_panel(images):
66
- """
67
- Arranges images in a comic panel layout.
68
- """
69
- # Create a list of Gradio Image components with captions
70
- image_components = []
71
- for idx, img in enumerate(images):
72
- image_components.append(gr.Image(value=img, label=f"Sentence {idx+1}"))
73
-
74
- return image_components
75
 
76
- def process_story(story):
77
- """
78
- Main processing function to generate images and arrange them in a comic panel.
79
- """
80
- images = generate_comic_images(story)
81
- image_components = create_comic_panel(images)
82
- return image_components
83
 
84
- # Define Gradio interface
85
  iface = gr.Interface(
86
- fn=process_story,
87
- inputs=gr.Textbox(lines=10, placeholder="Enter your short story here...", label="Short Story"),
88
- outputs=gr.Column(),
89
- title="Comic Generator",
90
- description="Enter a short story, and generate a comic-style image for each sentence using Hugging Face's API.",
91
- examples=[
92
- ["Once upon a time, there was a brave knight. He fought dragons and rescued princesses."]
93
- ]
94
  )
95
 
96
- if __name__ == "__main__":
97
- iface.launch()
 
 
1
  import gradio as gr
2
  import requests
3
+ import base64
4
+ import os
5
+ import time
 
 
 
 
6
 
7
+ # Get your Hugging Face access token from the secrets
8
+ HUGGING_FACE_API_KEY = os.getenv("Bharat2") # Ensure your access token is properly set
9
 
10
+ def generate_images_from_text(text):
11
+ # Generate an image using Hugging Face's API
12
+ headers = {
13
+ "Authorization": f"Bearer {HUGGING_FACE_API_KEY}"
14
+ }
15
+
16
+ response = requests.post(
17
+ "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell", # Correct model name
18
+ headers=headers,
19
+ json={"inputs": text}
20
+ )
21
+
22
+ if response.status_code == 200:
23
+ # The API response contains the image as raw binary data
24
+ image_data = response.content
25
+
26
+ # Convert the image to Base64 for embedding in HTML
27
+ base64_image = base64.b64encode(image_data).decode('utf-8')
28
+ return f"data:image/png;base64,{base64_image}"
29
+ else:
30
+ print(f"Error generating image: {response.text}")
31
+ return None
32
 
33
+ def generate_comic(short_story):
34
+ sentences = short_story.split('. ')
35
+ images = []
36
 
37
+ for sentence in sentences:
38
+ if sentence.strip():
39
+ try:
40
+ # Generate one image for each sentence
41
+ image_data = generate_images_from_text(sentence)
42
+ if image_data:
43
+ images.append(image_data)
44
 
45
+ # Sleep for a short duration to avoid hitting rate limits
46
+ time.sleep(2) # Adjust this time if needed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
+ except Exception as e:
49
+ print(f"Error generating image: {e}")
 
 
 
 
 
 
 
 
50
 
51
+ # Create comic HTML with reduced image sizes and panel layout
52
+ comic_html = '<div style="display: flex; flex-wrap: wrap;">'
53
+ for img in images:
54
+ comic_html += f'<div style="flex: 1; margin: 5px;"><img src="{img}" style="max-width: 300px; max-height: 300px;" /></div>'
55
+ comic_html += '</div>'
56
+
57
+ return comic_html
58
 
59
+ # Gradio interface
60
  iface = gr.Interface(
61
+ fn=generate_comic,
62
+ inputs=gr.Textbox(label="Your Short Story", lines=10, placeholder="Enter your short story here..."),
63
+ outputs=gr.HTML(label="Generated Comic"),
64
+ title="Narrative to Comic Generator",
65
+ description="Enter a short story, and the app will generate a comic-style storybook with images and dialogues."
 
 
 
66
  )
67
 
68
+ iface.launch()