File size: 1,352 Bytes
62850a2 448880c a4e0a36 e24ba6d a4e0a36 448880c 62850a2 448880c 62850a2 448880c 62850a2 448880c 23baa65 62850a2 448880c 62850a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Set model ID
# model_id = "deepseek-ai/deepseek-coder-1.3b-base"
model_id = "gpt2"
# model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
)
# Move model to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def generate_code(prompt):
if not prompt.strip():
return "⚠ Please enter a valid prompt."
inputs = tokenizer(prompt, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.7
)
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Strip the prompt if it appears at the start
if output_text.startswith(prompt):
output_text = output_text[len(prompt):].lstrip()
return output_text
demo = gr.Interface(
fn=generate_code,
inputs=gr.Textbox(lines=5, label="Enter Prompt"),
outputs=gr.Textbox(label="Generated Output"),
title="Code Generator using DeepSeek"
)
demo.launch()
|