Spaces:
Running
Running
File size: 999 Bytes
438b9ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
import streamlit as st
from typing import List
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("juierror/text-to-sql-with-table-schema")
model = AutoModelForSeq2SeqLM.from_pretrained("juierror/text-to-sql-with-table-schema")
t = st.text_input('enter tables')
q = st.text_input('enter question')
def prepare_input(question: str, table: str):
table_prefix = "table:"
question_prefix = "question:"
inputs = f"{question_prefix} {question} {table_prefix} {table}"
input_ids = tokenizer(inputs, max_length=700, return_tensors="pt").input_ids
return input_ids
def inference(question: str, table: str) -> str:
input_data = prepare_input(question=question, table=table)
input_data = input_data.to(model.device)
outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700)
result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True)
return result
st.write(inference(q,t)) |