Spaces:
Sleeping
Sleeping
File size: 2,235 Bytes
816ccb1 451c534 816ccb1 451c534 816ccb1 451c534 816ccb1 451c534 816ccb1 451c534 816ccb1 451c534 816ccb1 451c534 816ccb1 451c534 816ccb1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import gradio as gr
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# Set up device (GPU if available)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the fine-tuned model and tokenizer
model_name = "aarohanverma/text2sql_flant5base_finetuned" # Replace with your model repository name
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, torch_dtype=torch.bfloat16).to(device)
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
def generate_sql(context: str, query: str) -> str:
"""
Generates a SQL query given the provided context and natural language query.
Constructs a prompt from the inputs, then performs deterministic generation
with beam search.
"""
prompt = f"""Context:
{context}
Query:
{query}
Response:
"""
# Tokenize the prompt and move to device
inputs = tokenizer(prompt, return_tensors="pt").to(device)
# Ensure decoder_start_token_id is set for encoder-decoder generation
if model.config.decoder_start_token_id is None:
model.config.decoder_start_token_id = tokenizer.pad_token_id
# Generate the SQL output
generated_ids = model.generate(
input_ids=inputs["input_ids"],
decoder_start_token_id=model.config.decoder_start_token_id,
max_new_tokens=250,
temperature=0.0, # Deterministic output
num_beams=3, # Beam search for improved quality
early_stopping=True, # Stop when output is complete
)
# Decode and return the generated SQL statement
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)
# Create Gradio interface with two input boxes: one for context and one for query
iface = gr.Interface(
fn=generate_sql,
inputs=[
gr.Textbox(lines=8, label="Context", placeholder="Enter table schema, sample data, etc."),
gr.Textbox(lines=2, label="Query", placeholder="Enter your natural language query here...")
],
outputs="text",
title="Text-to-SQL Generator",
description="Enter your own context (e.g., database schema and sample data) and a natural language query. The model will generate the corresponding SQL statement."
)
iface.launch()
|