import gradio as gr from autogluon.multimodal import MultiModalPredictor def text_embedding(query: str): model_name = "sentence-transformers/all-MiniLM-L6-v2" predictor = MultiModalPredictor( pipeline="feature_extraction", hyperparameters={ "model.hf_text.checkpoint_name": model_name } ) query_embedding = predictor.extract_embedding([query]) return query_embedding["0"] def main(): with gr.Blocks(title="OpenSearch Demo") as demo: gr.Markdown("# Text Embedding for Search Queries") gr.Markdown("Ask an open question!") with gr.Row(): inp = gr.Textbox(show_label=False) with gr.Row(): btn = gr.Button("Generate Embedding") with gr.Row(): out = gr.DataFrame(label="Embedding", show_label=True) btn.click(fn=text_embedding, inputs=inp, outputs=out) demo.launch() if __name__ == "__main__": main()