conette / app.py
Labbeti's picture
Mod: Update forward to compute all audio files per batch and improve UI for hyperparameters.
d290679
raw
history blame
3.56 kB
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tempfile import NamedTemporaryFile
from typing import Any
import streamlit as st
from conette import CoNeTTEModel, conette
@st.cache_resource
def load_conette(*args, **kwargs) -> CoNeTTEModel:
return conette(*args, **kwargs)
def format_cand(cand: str) -> str:
return f"{cand[0].title()}{cand[1:]}."
def main() -> None:
st.header("Describe audio content with CoNeTTE")
model = load_conette(model_kwds=dict(device="cpu"))
st.warning("Recommanded audio: lasting from **1 to 30s**, sampled at **32 kHz**.")
audios = st.file_uploader(
"Upload audio files here:",
type=["wav", "flac", "mp3", "ogg", "avi"],
accept_multiple_files=True,
)
with st.expander("Model hyperparameters"):
task = st.selectbox("Task embedding input", model.tasks, 0)
allow_rep_mode = st.selectbox(
"Allow repetition of words", ["stopwords", "all", "none"], 0
)
beam_size: int = st.select_slider( # type: ignore
"Beam size",
list(range(1, 21)),
model.config.beam_size,
)
min_pred_size: int = st.select_slider( # type: ignore
"Minimal number of words",
list(range(1, 31)),
model.config.min_pred_size,
)
max_pred_size: int = st.select_slider( # type: ignore
"Maximal number of words",
list(range(1, 31)),
model.config.max_pred_size,
)
if allow_rep_mode == "all":
forbid_rep_mode = "none"
elif allow_rep_mode == "none":
forbid_rep_mode = "all"
elif allow_rep_mode == "stopwords":
forbid_rep_mode = "content_words"
else:
ALLOW_REP_MODES = ("all", "none", "stopwords")
raise ValueError(
f"Unknown option {allow_rep_mode=}. (expected one of {ALLOW_REP_MODES})"
)
del allow_rep_mode
kwargs: dict[str, Any] = dict(
task=task,
beam_size=beam_size,
min_pred_size=min_pred_size,
max_pred_size=max_pred_size,
forbid_rep_mode=forbid_rep_mode,
)
if audios is not None and len(audios) > 0:
audio_to_predict = []
cands = [""] * len(audios)
tmp_files = []
tmp_fpaths = []
audio_fnames = []
for i, audio in enumerate(audios):
audio_fname = audio.name
audio_fnames.append(audio_fname)
cand_key = f"{audio_fname}-{kwargs}"
if cand_key in st.session_state:
cand = st.session_state[cand_key]
cands[i] = cand
else:
tmp_file = NamedTemporaryFile()
tmp_file.write(audio.getvalue())
tmp_files.append(tmp_file)
audio_to_predict.append((i, cand_key, tmp_file))
tmp_fpath = tmp_file.name
tmp_fpaths.append(tmp_fpath)
if len(tmp_fpaths) > 0:
outputs = model(
tmp_fpaths,
**kwargs,
)
for i, (j, cand_key, tmp_file) in enumerate(audio_to_predict):
cand = outputs["cands"][i]
cands[j] = cand
st.session_state[cand_key] = cand
tmp_file.close()
for audio_fname, cand in zip(audio_fnames, cands):
st.success(f"**Output for {audio_fname}:**\n- {format_cand(cand)}")
if __name__ == "__main__":
main()