Update app/llm.py
Browse files- app/llm.py +3 -3
app/llm.py
CHANGED
|
@@ -103,7 +103,7 @@ async def generate(gen:GenModel):#, user: schemas.BaseUser = fastapi.Depends(cur
|
|
| 103 |
gen.temperature = 0.5
|
| 104 |
gen.seed = 42
|
| 105 |
try:
|
| 106 |
-
st = time()
|
| 107 |
output = llm_generate.create_completion(
|
| 108 |
#messages=[
|
| 109 |
# {"role": "system", "content": gen.system},
|
|
@@ -125,8 +125,8 @@ async def generate(gen:GenModel):#, user: schemas.BaseUser = fastapi.Depends(cur
|
|
| 125 |
print(delta['content'], end='')
|
| 126 |
#print(chunk)
|
| 127 |
|
| 128 |
-
et = time()
|
| 129 |
-
output["time"] = et - st
|
| 130 |
print(output)
|
| 131 |
except Exception as e:
|
| 132 |
logger.error(f"Error in /generate endpoint: {e}")
|
|
|
|
| 103 |
gen.temperature = 0.5
|
| 104 |
gen.seed = 42
|
| 105 |
try:
|
| 106 |
+
#st = time()
|
| 107 |
output = llm_generate.create_completion(
|
| 108 |
#messages=[
|
| 109 |
# {"role": "system", "content": gen.system},
|
|
|
|
| 125 |
print(delta['content'], end='')
|
| 126 |
#print(chunk)
|
| 127 |
|
| 128 |
+
#et = time()
|
| 129 |
+
#output["time"] = et - st
|
| 130 |
print(output)
|
| 131 |
except Exception as e:
|
| 132 |
logger.error(f"Error in /generate endpoint: {e}")
|