fastapi_wyw / main.py
test2023h5's picture
Update main.py
80a62d4 verified
raw
history blame
1.01 kB
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel, get_peft_config
import json
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
app = FastAPI()
# 定义一个数据模型,用于POST请求的参数
class ProcessRequest(BaseModel):
text: str
method: str
# GET请求接口
@app.get("/hello")
async def say_hello():
return {"message": "Hello, World!"}
# POST请求接口
@app.post("/process")
async def process_text(request: ProcessRequest):
if request.method == 1:
processed_text = request.text.upper()
elif request.method == 2:
processed_text = request.text.lower()
elif request.method == 3:
processed_text = request.text[::-1] # 反转字符串
else:
processed_text = request.text
return {"original_text": request.text, "processed_text": processed_text, "method": request.method}
print("fastapi done")