from fastapi import FastAPI from pydantic import BaseModel from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch # 创建 FastAPI 实例 app = FastAPI() # 加载预训练模型和分词器 tokenizer = AutoTokenizer.from_pretrained("openai-community/roberta-base-openai-detector") model = AutoModelForSequenceClassification.from_pretrained("openai-community/roberta-base-openai-detector") # 定义请求体的格式 class TextRequest(BaseModel): text: str # 定义一个 POST 请求处理函数 @app.post("/predict") async def predict(request: TextRequest): # 对输入文本进行分词 inputs = tokenizer(request.text, return_tensors="pt", truncation=True, max_length=512) # 获取模型预测结果 with torch.no_grad(): outputs = model(**inputs) print("模型原始输出:", outputs ) scores = torch.softmax(outputs.logits, dim=1) # 获取预测结果 predictions = scores.tolist()[0] fake_prob = predictions[0] # AI生成的概率 # 构建返回结果 result = [{ "label": "AI" if fake_prob > 0.5 else "Human", "score": fake_prob # 直接返回AI的概率 }] # 打印结果用于调试 print("预测结果:", result) return {"result": result} if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=7860)