Update app.py
Browse files
app.py
CHANGED
@@ -37,12 +37,12 @@ def generate_chat_response(request, llm):
|
|
37 |
temperature=request.temperature
|
38 |
)
|
39 |
reply = response['choices'][0]['message']['content']
|
40 |
-
return reply
|
41 |
except Exception as e:
|
42 |
-
return f"Error: {str(e)}"
|
43 |
|
44 |
def select_best_response(responses, request):
|
45 |
-
coherent_responses = filter_by_coherence(responses, request)
|
46 |
best_response = filter_by_similarity(coherent_responses)
|
47 |
return best_response
|
48 |
|
@@ -62,7 +62,6 @@ def filter_by_similarity(responses):
|
|
62 |
|
63 |
@app.post("/generate_chat")
|
64 |
async def generate_chat(request: ChatRequest):
|
65 |
-
# Ejecutar en ThreadPoolExecutor sin límite explícito de workers
|
66 |
with ThreadPoolExecutor(max_workers=None) as executor:
|
67 |
futures = [executor.submit(generate_chat_response, request, llm) for llm in llms]
|
68 |
responses = []
|
@@ -70,12 +69,17 @@ async def generate_chat(request: ChatRequest):
|
|
70 |
response = future.result()
|
71 |
responses.append(response)
|
72 |
|
73 |
-
if any("Error" in response for response in responses):
|
74 |
-
error_response = next(response for response in responses if "Error" in response)
|
75 |
-
raise HTTPException(status_code=500, detail=error_response)
|
76 |
|
77 |
-
best_response = select_best_response(responses, request)
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
if __name__ == "__main__":
|
81 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
37 |
temperature=request.temperature
|
38 |
)
|
39 |
reply = response['choices'][0]['message']['content']
|
40 |
+
return {"response": reply, "literal": user_input}
|
41 |
except Exception as e:
|
42 |
+
return {"response": f"Error: {str(e)}", "literal": user_input}
|
43 |
|
44 |
def select_best_response(responses, request):
|
45 |
+
coherent_responses = filter_by_coherence([resp['response'] for resp in responses], request)
|
46 |
best_response = filter_by_similarity(coherent_responses)
|
47 |
return best_response
|
48 |
|
|
|
62 |
|
63 |
@app.post("/generate_chat")
|
64 |
async def generate_chat(request: ChatRequest):
|
|
|
65 |
with ThreadPoolExecutor(max_workers=None) as executor:
|
66 |
futures = [executor.submit(generate_chat_response, request, llm) for llm in llms]
|
67 |
responses = []
|
|
|
69 |
response = future.result()
|
70 |
responses.append(response)
|
71 |
|
72 |
+
if any("Error" in response['response'] for response in responses):
|
73 |
+
error_response = next(response for response in responses if "Error" in response['response'])
|
74 |
+
raise HTTPException(status_code=500, detail=error_response['response'])
|
75 |
|
76 |
+
best_response = select_best_response([resp['response'] for resp in responses], request)
|
77 |
+
|
78 |
+
return {
|
79 |
+
"best_response": best_response,
|
80 |
+
"all_responses": [resp['response'] for resp in responses],
|
81 |
+
"literal_inputs": [resp['literal'] for resp in responses]
|
82 |
+
}
|
83 |
|
84 |
if __name__ == "__main__":
|
85 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|