Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -1,8 +1,27 @@
|
|
1 |
from wrapper import LLMWrapper
|
2 |
import uvicorn
|
3 |
from fastapi import FastAPI, Request
|
|
|
4 |
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
app = FastAPI()
|
7 |
llm_wrapper = LLMWrapper()
|
8 |
|
@@ -15,9 +34,5 @@ async def generate_text(request: Request):
|
|
15 |
if not prompt:
|
16 |
return {'error': 'Prompt is required'}, 400
|
17 |
|
18 |
-
generated_text =
|
19 |
return {'generated_text': generated_text}
|
20 |
-
|
21 |
-
|
22 |
-
if __name__ == '__main__':
|
23 |
-
uvicorn.run(app, host='127.0.0.1', port=8001)
|
|
|
1 |
from wrapper import LLMWrapper
|
2 |
import uvicorn
|
3 |
from fastapi import FastAPI, Request
|
4 |
+
from langchain_community.llms.ctransformers import CTransformers
|
5 |
|
6 |
|
7 |
+
|
8 |
+
MODEL_TYPE = 'mistral'
|
9 |
+
MODEL_BIN_PATH = "mistral-7b-instruct-v0.1.Q3_K_S.gguf"
|
10 |
+
MAX_NEW_TOKEN = 600
|
11 |
+
TEMPRATURE = 0.01
|
12 |
+
CONTEXT_LENGTH = 6000
|
13 |
+
|
14 |
+
|
15 |
+
llm = CTransformers(
|
16 |
+
model=MODEL_BIN_PATH,
|
17 |
+
config={
|
18 |
+
'max_new_tokens': MAX_NEW_TOKEN,
|
19 |
+
'temperature': TEMPRATURE,
|
20 |
+
'context_length': CONTEXT_LENGTH
|
21 |
+
},
|
22 |
+
model_type=MODEL_TYPE
|
23 |
+
)
|
24 |
+
|
25 |
app = FastAPI()
|
26 |
llm_wrapper = LLMWrapper()
|
27 |
|
|
|
34 |
if not prompt:
|
35 |
return {'error': 'Prompt is required'}, 400
|
36 |
|
37 |
+
generated_text = llm(prompt)
|
38 |
return {'generated_text': generated_text}
|
|
|
|
|
|
|
|