File size: 1,006 Bytes
b916cdf
a4d06d8
b916cdf
80a62d4
 
 
 
 
 
 
b916cdf
 
a4d06d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from fastapi import FastAPI
from pydantic import BaseModel

from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel, get_peft_config
import json
import torch

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

app = FastAPI()

# 定义一个数据模型,用于POST请求的参数
class ProcessRequest(BaseModel):
    text: str
    method: str

# GET请求接口
@app.get("/hello")
async def say_hello():
    return {"message": "Hello, World!"}

# POST请求接口
@app.post("/process")
async def process_text(request: ProcessRequest):
    if request.method == 1:
        processed_text = request.text.upper()
    elif request.method == 2:
        processed_text = request.text.lower()
    elif request.method == 3:
        processed_text = request.text[::-1]  # 反转字符串
    else:
        processed_text = request.text

    return {"original_text": request.text, "processed_text": processed_text, "method": request.method}

print("fastapi done")