Echo-ai commited on
Commit
02a645a
·
verified ·
1 Parent(s): 81b5b89

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +188 -0
app.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, jsonify, Response
2
+ from duckduckgo_search import DDGS
3
+ import json
4
+ import time
5
+ import sys
6
+ import argparse
7
+ import logging
8
+
9
+ # Configure logging
10
+ logging.basicConfig(level=logging.INFO,
11
+ format='%(asctime)s - %(levelname)s - %(message)s')
12
+ logger = logging.getLogger(__name__)
13
+
14
+ app = Flask(__name__)
15
+
16
+ class DDGChatAPI:
17
+ @staticmethod
18
+ def respond_json(response, key="message"):
19
+ """Create an OpenAI-compatible response format."""
20
+ return {
21
+ "id": f"chatcmpl-{int(time.time())}",
22
+ "object": "chat.completion",
23
+ "created": int(time.time()),
24
+ "choices": [{
25
+ "index": 0,
26
+ key: response,
27
+ "finish_reason": "stop"
28
+ }],
29
+ "usage": {
30
+ "prompt_tokens": 0,
31
+ "completion_tokens": 0,
32
+ "total_tokens": 0
33
+ }
34
+ }
35
+
36
+ @staticmethod
37
+ def validate_messages(messages):
38
+ """Validate message format."""
39
+ if not isinstance(messages, list):
40
+ return {
41
+ "error": {
42
+ "message": "'messages' must be a list",
43
+ "code": "invalid_message_list"
44
+ }
45
+ }, 400
46
+
47
+ for message in messages:
48
+ if not isinstance(message, dict) or 'role' not in message or 'content' not in message:
49
+ return {
50
+ "error": {
51
+ "message": "Each message must have a 'role' and a 'content'",
52
+ "code": "invalid_message"
53
+ }
54
+ }, 400
55
+
56
+ return None, None
57
+
58
+ @staticmethod
59
+ def wait_for_full_response(last_message, model='gpt-4o-mini', timeout=120):
60
+ """
61
+ Wait for a complete response from DuckDuckGo chat
62
+
63
+ Args:
64
+ last_message (str): User's message
65
+ model (str): Selected AI model
66
+ timeout (int): Maximum wait time in seconds
67
+
68
+ Returns:
69
+ str: Full AI response
70
+ """
71
+ start_time = time.time()
72
+ ddgs = DDGS()
73
+
74
+ attempts = 0
75
+ max_attempts = 5
76
+
77
+ while attempts < max_attempts:
78
+ try:
79
+ # Attempt to get response
80
+ response = ddgs.chat(last_message, model=model)
81
+
82
+ # Clean and validate response
83
+ cleaned_response = response.strip()
84
+
85
+ # Check response quality
86
+ if len(cleaned_response) >= 50:
87
+ logger.info(f"Successfully generated response in {attempts + 1} attempt(s)")
88
+ return cleaned_response
89
+
90
+ # If response is too short, wait and retry
91
+ attempts += 1
92
+ time.sleep(2) # Wait between attempts
93
+
94
+ # Break if total timeout is exceeded
95
+ if time.time() - start_time > timeout:
96
+ break
97
+
98
+ except Exception as e:
99
+ logger.error(f"Attempt {attempts + 1} failed: {str(e)}")
100
+ attempts += 1
101
+ time.sleep(2)
102
+
103
+ # Fallback message if no good response generated
104
+ return "I apologize, but I'm unable to generate a complete response at the moment. Please try again later."
105
+
106
+ @app.route("/")
107
+ def homepage():
108
+ return render_template("index.html")
109
+
110
+ @app.route("/v1/chat/completions", methods=["POST"])
111
+ def chat_completions():
112
+ # Get request data
113
+ data = request.json
114
+ messages = data.get("messages", [])
115
+ model = data.get("model", "gpt-4o-mini")
116
+ stream = data.get("stream", False)
117
+ timeout = data.get("timeout", 120)
118
+
119
+ # Validate messages
120
+ errors, status = DDGChatAPI.validate_messages(messages)
121
+ if errors:
122
+ return jsonify(errors), status
123
+
124
+ # Extract the last message content
125
+ last_message = messages[-1]["content"]
126
+
127
+ # Log incoming request
128
+ logger.info(f"Received chat request: model={model}, message={last_message[:100]}...")
129
+
130
+ try:
131
+ # Get full response
132
+ response = DDGChatAPI.wait_for_full_response(last_message, model, timeout)
133
+
134
+ # Log response generation
135
+ logger.info(f"Response generated (length: {len(response)} chars)")
136
+
137
+ # Handle streaming response
138
+ if stream:
139
+ def generate():
140
+ # Split response into chunks for streaming
141
+ max_chunk_size = 50
142
+ chunks = [response[i:i+max_chunk_size] for i in range(0, len(response), max_chunk_size)]
143
+
144
+ for chunk in chunks:
145
+ delta_response = {
146
+ "role": "assistant",
147
+ "content": chunk
148
+ }
149
+ yield f"data: {json.dumps(DDGChatAPI.respond_json(delta_response, 'delta'))}\n\n"
150
+
151
+ yield "data: [DONE]\n\n"
152
+
153
+ return Response(generate(), mimetype='text/event-stream')
154
+
155
+ # Regular JSON response
156
+ response_data = {
157
+ "role": "assistant",
158
+ "content": response
159
+ }
160
+ return jsonify(DDGChatAPI.respond_json(response_data))
161
+
162
+ except Exception as e:
163
+ logger.error(f"Error processing request: {str(e)}")
164
+ return jsonify({
165
+ "error": {
166
+ "message": str(e),
167
+ "code": "ddg_chat_error"
168
+ }
169
+ }), 500
170
+
171
+ @app.route("/v1/models", methods=["GET"])
172
+ def list_models():
173
+ """Provide a list of available models."""
174
+ return jsonify({
175
+ "data": [
176
+ {"id": "gpt-4o-mini"},
177
+ {"id": "claude-3-haiku"},
178
+ {"id": "llama-3.1-70b"},
179
+ {"id": "mixtral-8x7b"}
180
+ ],
181
+ "object": "list"
182
+ })
183
+
184
+ def main():
185
+ app.run(host='0.0.0.0', port=7860)
186
+
187
+ if __name__ == "__main__":
188
+ main()