Leo Liu commited on
Commit
38459f0
·
verified ·
1 Parent(s): 2d5d1a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -57
app.py CHANGED
@@ -1,58 +1,50 @@
1
- # import part
2
- from transformers import pipeline
3
  import streamlit as st
4
-
5
- # function part
6
- # img2text
7
- def img2text(url):
8
- image_to_text_model = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
9
- text = image_to_text_model(url)[0]["generated_text"]
10
- return text
11
-
12
- # text2story
13
- def text2story(text):
14
- story_text = "" # to be completed
15
- return story_text
16
-
17
- # text2audio
18
- def text2audio(story_text):
19
- audio_data = "" # to be completed
20
- return audio_data
21
-
22
- # main part
23
- st.set_page_config(page_title="Your Image to Audio Story",
24
- page_icon="🦜")
25
- st.header("Turn Your Image to Audio Story")
26
-
27
- # Upload image here
28
- uploaded_file = st.file_uploader("Select an Image...")
29
-
30
- if uploaded_file is not None:
31
- print(uploaded_file)
32
- bytes_data = uploaded_file.getvalue()
33
- with open(uploaded_file.name, "wb") as file:
34
- file.write(bytes_data)
35
- st.image(uploaded_file, caption="Uploaded Image",
36
- use_column_width=True)
37
-
38
- #Stage 1: Image to Text
39
- st.text('Processing img2text...')
40
- scenario = img2text(uploaded_file.name)
41
- st.write(scenario)
42
-
43
- #Stage 2: Text to Story
44
- st.text('Generating a story...')
45
- #story = text2story(scenario)
46
- #st.write(story)
47
-
48
- #Stage 3: Story to Audio data
49
- #st.text('Generating audio data...')
50
- #audio_data =text2audio(story)
51
-
52
- # Play button
53
- if st.button("Play Audio"):
54
- #st.audio(audio_data['audio'],
55
- # format="audio/wav",
56
- # start_time=0,
57
- # sample_rate = audio_data['sampling_rate'])
58
- st.audio("kids_playing_audio.wav")
 
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
+ from PIL import Image
4
+ import soundfile as sf
5
+ import io
6
+
7
+ # 1. 加载Pipeline
8
+ # - 图像→文本:使用 nlpconnect/vit-gpt2-image-captioning
9
+ # - 文本→语音:使用 facebook/mms-tts 或其它 TTS 模型
10
+ img_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
11
+ text_to_speech = pipeline("text-to-speech", model="facebook/mms-tts")
12
+
13
+ st.title("Image-to-Text and Text-to-Speech App (WAV output)")
14
+
15
+ # 2. 上传图片
16
+ uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
17
+ if uploaded_image:
18
+ # 显示图片
19
+ img = Image.open(uploaded_image)
20
+ st.image(img, caption="Uploaded Image", use_container_width=True)
21
+
22
+ # 3. 图像 → 文本
23
+ text_output = img_to_text(img)[0]["generated_text"]
24
+ st.write("### Extracted Text")
25
+ st.write(text_output)
26
+
27
+ # 4. 文本 → 语音 (TTS)
28
+ # text_to_speech(...) 返回一个 dict,包含 "audio" (numpy数组) 和 "sampling_rate"
29
+ st.write("### Listen to Speech Output")
30
+ speech_output = text_to_speech(text_output)
31
+
32
+ # 5. 将返回的音频数组写到内存中的 WAV 文件
33
+ audio_array = speech_output["audio"] # numpy array
34
+ sample_rate = speech_output["sampling_rate"] # 采样率
35
+
36
+ wav_io = io.BytesIO()
37
+ # 利用 soundfile 将音频数组写入内存,并指定格式为 WAV
38
+ sf.write(wav_io, audio_array, sample_rate, format="WAV")
39
+ wav_io.seek(0) # 将指针重置到开头,方便后续读取
40
+
41
+ # 6. 使用 st.audio 播放内存中的 WAV
42
+ st.audio(wav_io, format="audio/wav")
43
+
44
+ # 7. (可选) 提供下载按钮,下载 WAV 文件
45
+ st.download_button(
46
+ label="Download WAV",
47
+ data=wav_io,
48
+ file_name="speech.wav",
49
+ mime="audio/wav"
50
+ )