Spaces:
Runtime error
Runtime error
import os | |
from fastapi import FastAPI, HTTPException | |
from pydantic import BaseModel | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import subprocess | |
import tempfile | |
app = FastAPI() | |
# β Fix: Use `/tmp` as cache directory (writable in Hugging Face Spaces) | |
os.environ["TRANSFORMERS_CACHE"] = "/tmp" | |
os.environ["HF_HOME"] = "/tmp" | |
# β Ensure cache directory exists | |
if not os.path.exists("/tmp"): | |
os.makedirs("/tmp") | |
# β Load AI Model | |
model_name = "deepseek-ai/DeepSeek-Coder-V2-Base" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto") | |
class CodeRequest(BaseModel): | |
user_story: str | |
class TestRequest(BaseModel): | |
code: str | |
def generate_code(request: CodeRequest): | |
"""Generates AI-powered structured code based on user story""" | |
prompt = f"Generate structured code for: {request.user_story}" | |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") | |
output = model.generate(**inputs, max_length=300) | |
generated_code = tokenizer.decode(output[0], skip_special_tokens=True) | |
return {"generated_code": generated_code} | |
def test_code(request: TestRequest): | |
"""Runs automated testing on AI-generated code""" | |
try: | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".py", dir="/tmp") as temp_file: | |
temp_file.write(request.code.encode()) | |
temp_file.close() | |
result = subprocess.run(["pytest", temp_file.name], capture_output=True, text=True) | |
os.unlink(temp_file.name) | |
if result.returncode == 0: | |
return {"test_status": "All tests passed!"} | |
else: | |
return {"test_status": "Test failed!", "details": result.stderr} | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
def execute_code(): | |
"""Executes AI-generated code and returns output""" | |
sample_code = "print('Hello from AI-generated code!')" | |
try: | |
result = subprocess.run(["python3", "-c", sample_code], capture_output=True, text=True) | |
return {"status": "Execution successful!", "output": result.stdout} | |
except Exception as e: | |
return {"status": "Execution failed!", "error": str(e)} | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=7860) | |