AAhad commited on
Commit
3f2970b
·
1 Parent(s): 0b61780

take user input audi and convert to text

Browse files
Files changed (2) hide show
  1. app.py +71 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import soundfile as sf
3
+ import librosa
4
+ import numpy as np
5
+ import time
6
+ from transformers import pipeline
7
+ from io import BytesIO
8
+
9
+ # Define the models (You can replace these with any other top models supporting audio input)
10
+ MODELS = {
11
+ "Whisper (English)": "openai/whisper-small.en",
12
+ "Whisper (Multilingual)": "openai/whisper-small",
13
+ "Facebook Wav2Vec2": "facebook/wav2vec2-large-960h",
14
+ "Google Wav2Vec2": "google/wav2vec2-large-xlsr-53",
15
+ "Whisper (Thai)": "openai/whisper-large"
16
+ }
17
+
18
+ # App UI
19
+ st.title("Audio to Text Conversion")
20
+ st.subheader("Select language and model")
21
+
22
+ # Language selection
23
+ language = st.selectbox("Choose Language", options=["English", "Thai"])
24
+
25
+ # Model selection
26
+ model_choice = st.selectbox("Choose a Model", options=list(MODELS.keys()))
27
+
28
+ # Record audio
29
+ st.subheader("Record your audio")
30
+ audio_recorder = st.audio("")
31
+
32
+ if st.button("Start Recording"):
33
+ # Add code here to handle audio recording via mic or upload if needed
34
+ st.warning("Audio recording functionality needs to be implemented")
35
+
36
+ # Placeholder for conversion metrics
37
+ if audio_recorder:
38
+ st.write("Recording audio metrics...")
39
+
40
+ # Read audio file
41
+ audio_data, sr = librosa.load(audio_recorder, sr=None)
42
+
43
+ # Compute audio properties
44
+ audio_size = len(audio_data) * 2 # in bytes (16-bit PCM)
45
+ frame_rate = sr
46
+ duration = librosa.get_duration(y=audio_data, sr=sr)
47
+
48
+ # Display audio properties
49
+ st.write(f"Audio Size: {audio_size} bytes")
50
+ st.write(f"Frame Rate: {frame_rate} Hz")
51
+ st.write(f"Duration: {duration:.2f} seconds")
52
+
53
+ # Perform conversion using the selected model
54
+ st.subheader("Converting audio to text...")
55
+
56
+ start_time = time.time()
57
+
58
+ # Load the model from HuggingFace
59
+ model = pipeline("automatic-speech-recognition", model=MODELS[model_choice])
60
+
61
+ # Perform the conversion
62
+ audio_bytes = BytesIO(sf.write("temp.wav", audio_data, sr))
63
+ result = model(audio_bytes)
64
+
65
+ end_time = time.time()
66
+
67
+ # Display results
68
+ st.write("Transcription:", result['text'])
69
+ st.write(f"Conversion took {end_time - start_time:.2f} seconds")
70
+
71
+ # Provide placeholder for actual audio recording functionality if necessary.
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ transformers
3
+ librosa
4
+ soundfile