nananie143 commited on
Commit
3f46926
·
verified ·
1 Parent(s): 9305bd0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +331 -0
app.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from fastapi import FastAPI, HTTPException, Request
5
+ from pydantic import BaseModel
6
+ import uvicorn
7
+ from typing import List, Dict, Optional
8
+ from collections import defaultdict
9
+ from queue import PriorityQueue
10
+ import random
11
+
12
+ # Load the model and tokenizer
13
+ MODEL_NAME = "unit-mesh/autodev-coder-deepseek-6.7b-finetunes"
14
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
15
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")
16
+
17
+ # Custom CSS for OpenWebUI-like design
18
+ custom_css = """
19
+ #chatbot {
20
+ font-family: Arial, sans-serif;
21
+ max-width: 800px;
22
+ margin: auto;
23
+ padding: 20px;
24
+ border-radius: 10px;
25
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
26
+ }
27
+
28
+ #sidebar {
29
+ background-color: #f5f5f5;
30
+ padding: 20px;
31
+ border-radius: 10px;
32
+ }
33
+
34
+ .message.user {
35
+ background-color: #007bff;
36
+ color: white;
37
+ border-radius: 10px 10px 0 10px;
38
+ padding: 10px;
39
+ margin: 5px 0;
40
+ max-width: 70%;
41
+ margin-left: auto;
42
+ }
43
+
44
+ .message.bot {
45
+ background-color: #e9ecef;
46
+ color: black;
47
+ border-radius: 10px 10px 10px 0;
48
+ padding: 10px;
49
+ margin: 5px 0;
50
+ max-width: 70%;
51
+ margin-right: auto;
52
+ }
53
+
54
+ .dark-mode #chatbot {
55
+ background-color: #2d2d2d;
56
+ color: #ffffff;
57
+ }
58
+
59
+ .dark-mode #sidebar {
60
+ background-color: #1e1e1e;
61
+ color: #ffffff;
62
+ }
63
+
64
+ .dark-mode .message.user {
65
+ background-color: #0056b3;
66
+ }
67
+
68
+ .dark-mode .message.bot {
69
+ background-color: #3d3d3d;
70
+ color: #ffffff;
71
+ }
72
+ """
73
+
74
+ # Enhanced Reasoning Algorithms
75
+ class DeductiveReasoner:
76
+ def __init__(self, rules: Dict[str, str]):
77
+ self.rules = rules
78
+
79
+ def infer(self, premise: str, specific_case: str) -> str:
80
+ for condition, conclusion in self.rules.items():
81
+ if condition in specific_case:
82
+ return f"Given the premise '{premise}' and the specific case '{specific_case}', the conclusion is: {conclusion}"
83
+ return f"Given the premise '{premise}', no applicable rule was found for the specific case '{specific_case}'."
84
+
85
+
86
+ class InductiveReasoner:
87
+ def __init__(self):
88
+ self.patterns = defaultdict(int)
89
+
90
+ def learn(self, examples: List[str]):
91
+ for example in examples:
92
+ words = example.split()
93
+ for i in range(len(words) - 1):
94
+ self.patterns[(words[i], words[i + 1])] += 1
95
+
96
+ def infer(self) -> str:
97
+ if not self.patterns:
98
+ return "No patterns have been learned yet."
99
+ most_common_pattern = max(self.patterns, key=self.patterns.get)
100
+ return f"From the learned examples, the most common pattern is: '{most_common_pattern[0]} {most_common_pattern[1]}'."
101
+
102
+
103
+ class AbductiveReasoner:
104
+ def __init__(self, hypotheses: Dict[str, float]):
105
+ self.hypotheses = hypotheses
106
+
107
+ def evaluate(self, observation: str, likelihoods: Dict[str, float]) -> str:
108
+ posterior = {
109
+ hypothesis: prior * likelihoods.get(hypothesis, 0.0)
110
+ for hypothesis, prior in self.hypotheses.items()
111
+ }
112
+ best_hypothesis = max(posterior, key=posterior.get)
113
+ return f"Given the observation '{observation}', the most plausible explanation is: {best_hypothesis} (posterior probability: {posterior[best_hypothesis]:.2f})."
114
+
115
+
116
+ class BayesianReasoner:
117
+ def __init__(self, prior: float):
118
+ self.prior = prior
119
+
120
+ def update(self, evidence: str, likelihood: float) -> str:
121
+ posterior = self.prior * likelihood
122
+ self.prior = posterior # Update the prior for future reasoning
123
+ return f"Given the evidence '{evidence}', the updated probability is: {posterior:.2f}."
124
+
125
+
126
+ class HeuristicSearcher:
127
+ def __init__(self, heuristic_func):
128
+ self.heuristic_func = heuristic_func
129
+
130
+ def search(self, start, goal):
131
+ frontier = PriorityQueue()
132
+ frontier.put((0, start))
133
+ came_from = {}
134
+ cost_so_far = {}
135
+ came_from[start] = None
136
+ cost_so_far[start] = 0
137
+
138
+ while not frontier.empty():
139
+ _, current = frontier.get()
140
+
141
+ if current == goal:
142
+ break
143
+
144
+ for next_state in self.get_neighbors(current):
145
+ new_cost = cost_so_far[current] + 1 # Assume uniform cost
146
+ if next_state not in cost_so_far or new_cost < cost_so_far[next_state]:
147
+ cost_so_far[next_state] = new_cost
148
+ priority = new_cost + self.heuristic_func(next_state, goal)
149
+ frontier.put((priority, next_state))
150
+ came_from[next_state] = current
151
+
152
+ return f"Best solution found from {start} to {goal}."
153
+
154
+ def get_neighbors(self, state):
155
+ # Example: For a numeric state, return neighboring states
156
+ return [state - 1, state + 1]
157
+
158
+
159
+ # Initialize reasoning algorithms
160
+ deductive_reasoner = DeductiveReasoner(
161
+ rules={
162
+ "error": "Check for syntax errors in the code.",
163
+ "loop": "Optimize the loop structure for better performance.",
164
+ "null": "Ensure proper null checks are in place.",
165
+ }
166
+ )
167
+
168
+ inductive_reasoner = InductiveReasoner()
169
+ inductive_reasoner.learn(["If it rains, the ground gets wet.", "If you study, you pass the exam."])
170
+
171
+ abductive_reasoner = AbductiveReasoner(
172
+ hypotheses={"syntax error": 0.3, "logical error": 0.5, "runtime error": 0.2}
173
+ )
174
+
175
+ bayesian_reasoner = BayesianReasoner(prior=0.5)
176
+
177
+ heuristic_searcher = HeuristicSearcher(heuristic_func=lambda state, goal: abs(state - goal))
178
+
179
+
180
+ # Chatbot function with reasoning enhancements
181
+ def chatbot_response(message, history, reasoning_algorithm, file_content=None):
182
+ history = history or []
183
+ reasoning = {
184
+ "Deductive": deductive_reasoner.infer("General rule", message),
185
+ "Inductive": inductive_reasoner.infer(),
186
+ "Abductive": abductive_reasoner.evaluate(message, {"syntax error": 0.8, "logical error": 0.5}),
187
+ "Bayesian": bayesian_reasoner.update(message, likelihood=0.7),
188
+ "Heuristic": heuristic_searcher.search(start=0, goal=10),
189
+ }.get(reasoning_algorithm, "Invalid reasoning algorithm.")
190
+
191
+ # Append file content if provided
192
+ if file_content:
193
+ reasoning += f"\n\nFile Content:\n{file_content}"
194
+
195
+ history.append((message, reasoning))
196
+ return history, history
197
+
198
+
199
+ # File upload handler
200
+ def handle_file_upload(file):
201
+ if file:
202
+ with open(file.name, "r") as f:
203
+ content = f.read()
204
+ return content
205
+ return None
206
+
207
+
208
+ # Theme toggling
209
+ def toggle_theme(theme):
210
+ if theme == "Dark":
211
+ return gr.update(css=custom_css + ".dark-mode")
212
+ else:
213
+ return gr.update(css=custom_css)
214
+
215
+
216
+ # Gradio interface
217
+ with gr.Blocks(css=custom_css) as demo:
218
+ gr.Markdown("# OpenWebUI-like Chat Interface with Reasoning Enhancements")
219
+ with gr.Row():
220
+ with gr.Column(scale=1, elem_id="sidebar"):
221
+ gr.Markdown("### Settings")
222
+ model_selector = gr.Dropdown(["Model 1", "Model 2"], label="Select Model")
223
+ reasoning_selector = gr.Dropdown(
224
+ ["Deductive", "Inductive", "Abductive", "Bayesian", "Heuristic"],
225
+ label="Select Reasoning Algorithm",
226
+ value="Deductive",
227
+ )
228
+ theme_selector = gr.Radio(["Light", "Dark"], label="Theme", value="Light")
229
+ file_upload = gr.File(label="Upload File")
230
+ with gr.Column(scale=3, elem_id="chatbot"):
231
+ chatbot = gr.Chatbot(label="Chat")
232
+ message = gr.Textbox(label="Your Message", placeholder="Type your message here...")
233
+ submit = gr.Button("Send")
234
+ state = gr.State()
235
+
236
+ # Chat interaction
237
+ submit.click(
238
+ chatbot_response,
239
+ inputs=[message, state, reasoning_selector, file_upload],
240
+ outputs=[chatbot, state],
241
+ )
242
+
243
+ # File upload handling
244
+ file_upload.change(
245
+ handle_file_upload,
246
+ inputs=file_upload,
247
+ outputs=message,
248
+ )
249
+
250
+ # Theme toggling
251
+ theme_selector.change(
252
+ toggle_theme,
253
+ inputs=theme_selector,
254
+ outputs=None,
255
+ )
256
+
257
+
258
+ # OpenAI-compatible API using FastAPI
259
+ app = FastAPI()
260
+
261
+ class ChatCompletionRequest(BaseModel):
262
+ model: str
263
+ messages: List[dict]
264
+ max_tokens: Optional[int] = 500
265
+ temperature: Optional[float] = 0.7
266
+
267
+ class ChatCompletionResponse(BaseModel):
268
+ id: str
269
+ object: str = "chat.completion"
270
+ created: int
271
+ model: str
272
+ choices: List[dict]
273
+ usage: dict
274
+
275
+ @app.post("/v1/chat/completions")
276
+ async def chat_completions(request: ChatCompletionRequest):
277
+ try:
278
+ # Extract the last user message
279
+ user_message = request.messages[-1]["content"]
280
+
281
+ # Generate a response using the model
282
+ inputs = tokenizer(user_message, return_tensors="pt").to(model.device)
283
+ outputs = model.generate(**inputs, max_length=request.max_tokens, temperature=request.temperature)
284
+ response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
285
+
286
+ # Format the response in OpenAI-compatible format
287
+ response = ChatCompletionResponse(
288
+ id="chatcmpl-12345",
289
+ created=int(torch.tensor(0)), # Placeholder for timestamp
290
+ model=request.model,
291
+ choices=[
292
+ {
293
+ "message": {
294
+ "role": "assistant",
295
+ "content": response_text,
296
+ },
297
+ "finish_reason": "stop",
298
+ "index": 0,
299
+ }
300
+ ],
301
+ usage={
302
+ "prompt_tokens": len(tokenizer.encode(user_message)),
303
+ "completion_tokens": len(tokenizer.encode(response_text)),
304
+ "total_tokens": len(tokenizer.encode(user_message)) + len(tokenizer.encode(response_text)),
305
+ },
306
+ )
307
+ return response
308
+ except Exception as e:
309
+ raise HTTPException(status_code=500, detail=str(e))
310
+
311
+
312
+ # Run the FastAPI server
313
+ def run_api():
314
+ uvicorn.run(app, host="0.0.0.0", port=8000)
315
+
316
+
317
+ # Run the Gradio app
318
+ def run_gradio():
319
+ demo.launch(server_name="0.0.0.0", server_port=7860)
320
+
321
+
322
+ # Entry point
323
+ if __name__ == "__main__":
324
+ import threading
325
+
326
+ # Start the FastAPI server in a separate thread
327
+ api_thread = threading.Thread(target=run_api)
328
+ api_thread.start()
329
+
330
+ # Start the Gradio app
331
+ run_gradio()