Spaces:
Runtime error
Runtime error
File size: 1,131 Bytes
216abcb bb40de3 34c5fe6 bb40de3 34c5fe6 f513163 bb40de3 e35ad58 bb40de3 34c5fe6 b4b3499 bb40de3 ce5eee7 bb40de3 34c5fe6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
import os
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
app = FastAPI()
# ✅ Load DeepSeek-Coder-V2-Base Model with `trust_remote_code=True`
model_name = "deepseek-ai/DeepSeek-Coder-V2-Base"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True)
class CodeRequest(BaseModel):
user_story: str
@app.post("/generate-code")
def generate_code(request: CodeRequest):
"""Generates structured AI-powered code based on user story"""
prompt = f"Generate structured code for: {request.user_story}"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
output = model.generate(**inputs, max_length=300)
generated_code = tokenizer.decode(output[0], skip_special_tokens=True)
return {"generated_code": generated_code}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)
|