File size: 1,612 Bytes
9d990ac
 
 
 
 
 
 
 
 
 
 
 
 
 
5f32ce4
 
 
6c098bc
5f32ce4
6c098bc
5f32ce4
 
 
 
 
 
 
6c098bc
5f32ce4
6c098bc
5f32ce4
 
 
 
 
 
 
 
6c098bc
5f32ce4
 
306e397
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI()

# CORSの設定
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # すべてのオリジンを許可(必要に応じて制限を設定)
    allow_credentials=True,
    allow_methods=["*"],  # すべてのHTTPメソッドを許可
    allow_headers=["*"],  # すべてのヘッダーを許可
)


from fastapi import FastAPI, Query
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

app = FastAPI()

# モデルとトークナイザーのロード
def load_prompter():
    prompter_model = AutoModelForCausalLM.from_pretrained("microsoft/Promptist")
    tokenizer = AutoTokenizer.from_pretrained("gpt2")
    tokenizer.pad_token = tokenizer.eos_token
    tokenizer.padding_side = "left"
    return prompter_model, tokenizer

prompter_model, prompter_tokenizer = load_prompter()

@app.get("/generate")
async def generate(text: str = Query(..., description="Input text to be processed by the model")):
    input_ids = prompter_tokenizer(text.strip() + " Rephrase:", return_tensors="pt").input_ids
    eos_id = prompter_tokenizer.eos_token_id
    outputs = prompter_model.generate(input_ids, do_sample=False, max_new_tokens=75, num_beams=8, num_return_sequences=1, eos_token_id=eos_id, pad_token_id=eos_id, length_penalty=-1.0)
    output_texts = prompter_tokenizer.batch_decode(outputs, skip_special_tokens=True)
    res = output_texts[0].replace(text + " Rephrase:", "").strip()
    return {"result": res}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)