|
import torch |
|
from transformers import pipeline |
|
import gradio as gr |
|
|
|
|
|
model_id = "umarigan/deepseek-r1-reasoning-prompt-generator" |
|
pipe = pipeline( |
|
"text-generation", |
|
model=model_id, |
|
torch_dtype=torch.bfloat16, |
|
device_map="auto", |
|
) |
|
|
|
def generate_prompt(query): |
|
messages = [ |
|
{"role": "user", "content": query}, |
|
] |
|
outputs = pipe( |
|
messages, |
|
max_new_tokens=4096, |
|
) |
|
return outputs[0]["generated_text"][-1]['content'] |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_prompt, |
|
inputs=gr.Textbox(lines=2, placeholder="Enter your query here..."), |
|
outputs=gr.Textbox(lines=10, placeholder="Generated reasoning prompt will appear here..."), |
|
title="Reasoning Prompt Generator", |
|
description="Enter a query to generate a reasoning prompt.", |
|
) |
|
|
|
|
|
iface.launch() |