Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,10 +22,7 @@ app = FastAPI()
|
|
22 |
async def read_root():
|
23 |
return {"Hello": "World!"}
|
24 |
|
25 |
-
|
26 |
-
@app.post("/model")
|
27 |
-
async def model(data: dict):
|
28 |
-
prompt = data.get("prompt")
|
29 |
messages = [
|
30 |
{"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."},
|
31 |
{"role": "user", "content": "Who are you?"},
|
@@ -50,9 +47,7 @@ async def model(data: dict):
|
|
50 |
|
51 |
return response
|
52 |
|
53 |
-
|
54 |
-
async def model1(data: dict):
|
55 |
-
prompt = data.get("prompt")
|
56 |
messages = [
|
57 |
{"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."},
|
58 |
{"role": "user", "content": "Who are you?"},
|
@@ -65,7 +60,7 @@ async def model1(data: dict):
|
|
65 |
add_generation_prompt=True
|
66 |
)
|
67 |
model_inputs = tokenizer([text], return_tensors="pt").to(device)
|
68 |
-
generated_ids =
|
69 |
model_inputs.input_ids,
|
70 |
max_new_tokens=64,
|
71 |
do_sample=True
|
@@ -75,4 +70,16 @@ async def model1(data: dict):
|
|
75 |
]
|
76 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
77 |
|
78 |
-
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
async def read_root():
|
23 |
return {"Hello": "World!"}
|
24 |
|
25 |
+
def modelResp(promt):
|
|
|
|
|
|
|
26 |
messages = [
|
27 |
{"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."},
|
28 |
{"role": "user", "content": "Who are you?"},
|
|
|
47 |
|
48 |
return response
|
49 |
|
50 |
+
def modelResp1(promt):
|
|
|
|
|
51 |
messages = [
|
52 |
{"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."},
|
53 |
{"role": "user", "content": "Who are you?"},
|
|
|
60 |
add_generation_prompt=True
|
61 |
)
|
62 |
model_inputs = tokenizer([text], return_tensors="pt").to(device)
|
63 |
+
generated_ids = model1.generate(
|
64 |
model_inputs.input_ids,
|
65 |
max_new_tokens=64,
|
66 |
do_sample=True
|
|
|
70 |
]
|
71 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
72 |
|
73 |
+
return response
|
74 |
+
|
75 |
+
@app.post("/modelapi")
|
76 |
+
async def modelApi(data: dict):
|
77 |
+
prompt = data.get("prompt")
|
78 |
+
response = modelResp(prompt)
|
79 |
+
return response
|
80 |
+
|
81 |
+
@app.post("/modelapi1")
|
82 |
+
async def modelApi1(data: dict):
|
83 |
+
prompt = data.get("prompt")
|
84 |
+
response = modelResp1(prompt)
|
85 |
+
return response
|