mayf commited on
Commit
8f922fb
·
verified ·
1 Parent(s): 4d1f328

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -17
app.py CHANGED
@@ -21,23 +21,21 @@ def load_models():
21
  captioner = pipeline(
22
  "image-to-text",
23
  model="Salesforce/blip-image-captioning-base",
24
- device=-1
25
  )
26
 
27
- # Story generation model with optimized settings
28
  storyteller = pipeline(
29
  "text-generation",
30
  model="Qwen/Qwen3-1.7B",
31
  device_map="auto",
32
  trust_remote_code=True,
33
  torch_dtype="auto",
34
- model_kwargs={
35
- "revision": "main",
36
- "temperature": 0.7,
37
- "top_p": 0.9,
38
- "repetition_penalty": 1.1,
39
- "pad_token_id": 151645
40
- }
41
  )
42
 
43
  return captioner, storyteller
@@ -82,12 +80,16 @@ uploaded_image = st.file_uploader(
82
  )
83
 
84
  if uploaded_image:
85
- # Display uploaded image
86
  image = Image.open(uploaded_image).convert("RGB")
87
- st.image(image, use_column_width=True)
88
 
89
  # Load models only when needed
90
- caption_pipe, story_pipe = load_models()
 
 
 
 
91
 
92
  # Generate image caption
93
  with st.spinner("🔍 Analyzing image..."):
@@ -96,11 +98,11 @@ if uploaded_image:
96
  image_caption = caption_result[0].get("generated_text", "").strip()
97
 
98
  if not image_caption:
99
- raise ValueError("Couldn't generate caption")
100
 
101
  st.success(f"**Image Understanding:** {image_caption}")
102
  except Exception as e:
103
- st.error("❌ Failed to analyze image. Please try another.")
104
  st.stop()
105
 
106
  # Create story prompt
@@ -123,8 +125,6 @@ if uploaded_image:
123
  try:
124
  story_result = story_pipe(
125
  story_prompt,
126
- max_new_tokens=300,
127
- do_sample=True,
128
  num_return_sequences=1
129
  )
130
  raw_story = story_result[0]['generated_text']
@@ -151,4 +151,3 @@ if uploaded_image:
151
  # Footer
152
  st.markdown("---")
153
  st.caption("Made with ♥ by The Story Wizard • [Report Issues](https://example.com)")
154
-
 
21
  captioner = pipeline(
22
  "image-to-text",
23
  model="Salesforce/blip-image-captioning-base",
24
+ device=-1 # Force CPU usage
25
  )
26
 
27
+ # Story generation model with updated parameters
28
  storyteller = pipeline(
29
  "text-generation",
30
  model="Qwen/Qwen3-1.7B",
31
  device_map="auto",
32
  trust_remote_code=True,
33
  torch_dtype="auto",
34
+ temperature=0.7,
35
+ top_p=0.9,
36
+ repetition_penalty=1.1,
37
+ pad_token_id=151645,
38
+ max_new_tokens=300
 
 
39
  )
40
 
41
  return captioner, storyteller
 
80
  )
81
 
82
  if uploaded_image:
83
+ # Display uploaded image with modern parameter
84
  image = Image.open(uploaded_image).convert("RGB")
85
+ st.image(image, use_container_width=True) # Updated parameter
86
 
87
  # Load models only when needed
88
+ try:
89
+ caption_pipe, story_pipe = load_models()
90
+ except Exception as e:
91
+ st.error(f"❌ Model loading failed: {str(e)}")
92
+ st.stop()
93
 
94
  # Generate image caption
95
  with st.spinner("🔍 Analyzing image..."):
 
98
  image_caption = caption_result[0].get("generated_text", "").strip()
99
 
100
  if not image_caption:
101
+ raise ValueError("Empty caption generated")
102
 
103
  st.success(f"**Image Understanding:** {image_caption}")
104
  except Exception as e:
105
+ st.error("❌ Image analysis failed. Please try another image.")
106
  st.stop()
107
 
108
  # Create story prompt
 
125
  try:
126
  story_result = story_pipe(
127
  story_prompt,
 
 
128
  num_return_sequences=1
129
  )
130
  raw_story = story_result[0]['generated_text']
 
151
  # Footer
152
  st.markdown("---")
153
  st.caption("Made with ♥ by The Story Wizard • [Report Issues](https://example.com)")