Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
from fastapi import FastAPI, Query
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
import torch
|
@@ -16,7 +14,7 @@ def load_prompter():
|
|
16 |
|
17 |
prompter_model, prompter_tokenizer = load_prompter()
|
18 |
|
19 |
-
@app.get("/
|
20 |
async def generate(text: str = Query(..., description="Input text to be processed by the model")):
|
21 |
input_ids = prompter_tokenizer(text.strip() + " Rephrase:", return_tensors="pt").input_ids
|
22 |
eos_id = prompter_tokenizer.eos_token_id
|
@@ -27,4 +25,4 @@ async def generate(text: str = Query(..., description="Input text to be processe
|
|
27 |
|
28 |
if __name__ == "__main__":
|
29 |
import uvicorn
|
30 |
-
uvicorn.run(app, host="0.0.0.0", port=
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI, Query
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
|
|
14 |
|
15 |
prompter_model, prompter_tokenizer = load_prompter()
|
16 |
|
17 |
+
@app.get("/")
|
18 |
async def generate(text: str = Query(..., description="Input text to be processed by the model")):
|
19 |
input_ids = prompter_tokenizer(text.strip() + " Rephrase:", return_tensors="pt").input_ids
|
20 |
eos_id = prompter_tokenizer.eos_token_id
|
|
|
25 |
|
26 |
if __name__ == "__main__":
|
27 |
import uvicorn
|
28 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|