File size: 1,590 Bytes
90d8ee3
df8873e
687f4c4
90d8ee3
df8873e
 
 
 
90d8ee3
c475fa1
df8873e
 
 
 
 
 
 
 
 
 
 
 
 
90d8ee3
df8873e
c475fa1
 
 
df8873e
 
 
c475fa1
df8873e
 
 
 
90d8ee3
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# 加载模型和分词器
model_name = "defog/sqlcoder-7b-2"  # 使用更新的模型以提高性能
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")  # 使用半精度以降低内存占用

def generate_sql(user_question, create_table_statements):
    # 准备输入
    prompt = f"Generate a SQL query to answer this question: `{user_question}`\nDDL statements:\n{create_table_statements}\nThe following SQL query best answers the question `{user_question}`:"
    
    # 编码输入
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    
    # 生成输出
    with torch.no_grad():
        outputs = model.generate(**inputs, max_length=150)
    
    # 解码输出
    sql_query = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return sql_query

# 创建 Gradio 接口
with gr.Blocks() as demo:
    gr.Markdown("## SQL Query Generator")
    user_question = gr.Textbox(label="User Question", placeholder="请输入您的问题...")
    create_table_statements = gr.Textbox(label="DDL Statements", placeholder="请输入表的DDL语句...")
    sql_output = gr.Textbox(label="Generated SQL Query", interactive=False)
    
    submit_btn = gr.Button("Generate SQL")
    submit_btn.click(generate_sql, inputs=[user_question, create_table_statements], outputs=sql_output)

# 启动 Gradio 应用
demo.launch()


if __name__ == "__main__":
    demo.launch()