Spaces:
Sleeping
Sleeping
import gradio as gr | |
def generate_sql(schema, query): | |
messages = [ | |
{"role": "system", "content": f"""You are a SQL assistant. Use the following database schema to answer the user's questions:\n{schema}"""}, | |
{"role": "user", "content": query}, | |
] | |
inputs = tokenizer.apply_chat_template( | |
messages, | |
tokenize=True, | |
add_generation_prompt=True, | |
return_tensors="pt", | |
).to("cuda") | |
text_streamer = TextStreamer(tokenizer, skip_prompt=True) | |
outputs = model.generate( | |
input_ids=inputs, | |
streamer=text_streamer, | |
max_new_tokens=400, | |
use_cache=True, | |
temperature=0.1, # Adjust temperature as needed | |
min_p=0.1, | |
) | |
generated_text = tokenizer.batch_decode(outputs)[0] | |
return generated_text | |
iface = gr.Interface( | |
fn=generate_sql, | |
inputs=[ | |
gr.Textbox(label="Database Schema", lines=5), | |
gr.Textbox(label="SQL Query"), | |
], | |
outputs=gr.Textbox(label="Generated SQL"), | |
title="SQL Assistant", | |
description="Enter a database schema and a SQL query to get the corresponding SQL code.", | |
) | |
iface.launch() |