|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("openai-community/roberta-base-openai-detector") |
|
model = AutoModelForSequenceClassification.from_pretrained("openai-community/roberta-base-openai-detector") |
|
|
|
|
|
class TextRequest(BaseModel): |
|
text: str |
|
|
|
|
|
@app.post("/predict") |
|
async def predict(request: TextRequest): |
|
|
|
inputs = tokenizer(request.text, return_tensors="pt", truncation=True, max_length=512) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
print("模型原始输出:", outputs ) |
|
scores = torch.softmax(outputs.logits, dim=1) |
|
|
|
|
|
predictions = scores.tolist()[0] |
|
fake_prob = predictions[0] |
|
|
|
|
|
result = [{ |
|
"label": "AI" if fake_prob > 0.5 else "Human", |
|
"score": fake_prob |
|
}] |
|
|
|
|
|
|
|
print("预测结果:", result) |
|
|
|
return {"result": result} |
|
|
|
if __name__ == "__main__": |
|
import uvicorn |
|
uvicorn.run(app, host="0.0.0.0", port=7860) |