Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,28 +4,30 @@ from transformers import pipeline
|
|
4 |
from PIL import Image
|
5 |
|
6 |
# function part
|
7 |
-
# img2text - Using
|
8 |
def img2text(image):
|
9 |
-
# Use
|
10 |
-
image_to_text = pipeline("image-to-text", model="
|
11 |
-
|
|
|
12 |
return text
|
13 |
|
14 |
-
# text2story - Using
|
15 |
def text2story(text):
|
16 |
-
#
|
17 |
-
generator = pipeline("text-generation", model="
|
18 |
|
19 |
-
# Create a
|
20 |
-
prompt = f"
|
21 |
|
22 |
-
# Generate with
|
23 |
story_result = generator(
|
24 |
prompt,
|
25 |
-
max_new_tokens=
|
26 |
num_return_sequences=1,
|
27 |
temperature=0.7,
|
28 |
top_k=50,
|
|
|
29 |
do_sample=True
|
30 |
)
|
31 |
|
@@ -33,17 +35,26 @@ def text2story(text):
|
|
33 |
story_text = story_result[0]['generated_text']
|
34 |
story_text = story_text.replace(prompt, "Once upon a time, ")
|
35 |
|
36 |
-
# Find a natural ending point (end of sentence)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
return story_text
|
49 |
|
|
|
4 |
from PIL import Image
|
5 |
|
6 |
# function part
|
7 |
+
# img2text - Using the original model
|
8 |
def img2text(image):
|
9 |
+
# Use the specified model but with optimized parameters
|
10 |
+
image_to_text = pipeline("image-to-text", model="sooh-j/blip-image-captioning-base")
|
11 |
+
# Limiting the output length for speed
|
12 |
+
text = image_to_text(image, max_new_tokens=30)[0]["generated_text"]
|
13 |
return text
|
14 |
|
15 |
+
# text2story - Using the original model but with optimized parameters
|
16 |
def text2story(text):
|
17 |
+
# Using the specified TinyLlama model
|
18 |
+
generator = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
|
19 |
|
20 |
+
# Create a prompt for the story generation
|
21 |
+
prompt = f"Write a brief children's story based on this: {text}. Once upon a time, "
|
22 |
|
23 |
+
# Generate with more constrained parameters for speed
|
24 |
story_result = generator(
|
25 |
prompt,
|
26 |
+
max_new_tokens=150, # Use max_new_tokens instead of max_length for efficiency
|
27 |
num_return_sequences=1,
|
28 |
temperature=0.7,
|
29 |
top_k=50,
|
30 |
+
top_p=0.95,
|
31 |
do_sample=True
|
32 |
)
|
33 |
|
|
|
35 |
story_text = story_result[0]['generated_text']
|
36 |
story_text = story_text.replace(prompt, "Once upon a time, ")
|
37 |
|
38 |
+
# Find a natural ending point (end of sentence) before 100 words
|
39 |
+
words = story_text.split()
|
40 |
+
if len(words) > 100:
|
41 |
+
# Join the first 100 words
|
42 |
+
shortened_text = " ".join(words[:100])
|
43 |
+
|
44 |
+
# Find the last complete sentence
|
45 |
+
last_period = shortened_text.rfind('.')
|
46 |
+
last_question = shortened_text.rfind('?')
|
47 |
+
last_exclamation = shortened_text.rfind('!')
|
48 |
+
|
49 |
+
# Find the last sentence ending punctuation
|
50 |
+
last_end = max(last_period, last_question, last_exclamation)
|
51 |
+
|
52 |
+
if last_end > 0:
|
53 |
+
# Truncate at the end of the last complete sentence
|
54 |
+
story_text = shortened_text[:last_end + 1]
|
55 |
+
else:
|
56 |
+
# If no sentence ending found, just use the shortened text
|
57 |
+
story_text = shortened_text
|
58 |
|
59 |
return story_text
|
60 |
|