File size: 10,313 Bytes
3f46926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
import uvicorn
from typing import List, Dict, Optional
from collections import defaultdict
from queue import PriorityQueue
import random

# Load the model and tokenizer
MODEL_NAME = "unit-mesh/autodev-coder-deepseek-6.7b-finetunes"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")

# Custom CSS for OpenWebUI-like design
custom_css = """
#chatbot {
    font-family: Arial, sans-serif;
    max-width: 800px;
    margin: auto;
    padding: 20px;
    border-radius: 10px;
    box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}

#sidebar {
    background-color: #f5f5f5;
    padding: 20px;
    border-radius: 10px;
}

.message.user {
    background-color: #007bff;
    color: white;
    border-radius: 10px 10px 0 10px;
    padding: 10px;
    margin: 5px 0;
    max-width: 70%;
    margin-left: auto;
}

.message.bot {
    background-color: #e9ecef;
    color: black;
    border-radius: 10px 10px 10px 0;
    padding: 10px;
    margin: 5px 0;
    max-width: 70%;
    margin-right: auto;
}

.dark-mode #chatbot {
    background-color: #2d2d2d;
    color: #ffffff;
}

.dark-mode #sidebar {
    background-color: #1e1e1e;
    color: #ffffff;
}

.dark-mode .message.user {
    background-color: #0056b3;
}

.dark-mode .message.bot {
    background-color: #3d3d3d;
    color: #ffffff;
}
"""

# Enhanced Reasoning Algorithms
class DeductiveReasoner:
    def __init__(self, rules: Dict[str, str]):
        self.rules = rules

    def infer(self, premise: str, specific_case: str) -> str:
        for condition, conclusion in self.rules.items():
            if condition in specific_case:
                return f"Given the premise '{premise}' and the specific case '{specific_case}', the conclusion is: {conclusion}"
        return f"Given the premise '{premise}', no applicable rule was found for the specific case '{specific_case}'."


class InductiveReasoner:
    def __init__(self):
        self.patterns = defaultdict(int)

    def learn(self, examples: List[str]):
        for example in examples:
            words = example.split()
            for i in range(len(words) - 1):
                self.patterns[(words[i], words[i + 1])] += 1

    def infer(self) -> str:
        if not self.patterns:
            return "No patterns have been learned yet."
        most_common_pattern = max(self.patterns, key=self.patterns.get)
        return f"From the learned examples, the most common pattern is: '{most_common_pattern[0]} {most_common_pattern[1]}'."


class AbductiveReasoner:
    def __init__(self, hypotheses: Dict[str, float]):
        self.hypotheses = hypotheses

    def evaluate(self, observation: str, likelihoods: Dict[str, float]) -> str:
        posterior = {
            hypothesis: prior * likelihoods.get(hypothesis, 0.0)
            for hypothesis, prior in self.hypotheses.items()
        }
        best_hypothesis = max(posterior, key=posterior.get)
        return f"Given the observation '{observation}', the most plausible explanation is: {best_hypothesis} (posterior probability: {posterior[best_hypothesis]:.2f})."


class BayesianReasoner:
    def __init__(self, prior: float):
        self.prior = prior

    def update(self, evidence: str, likelihood: float) -> str:
        posterior = self.prior * likelihood
        self.prior = posterior  # Update the prior for future reasoning
        return f"Given the evidence '{evidence}', the updated probability is: {posterior:.2f}."


class HeuristicSearcher:
    def __init__(self, heuristic_func):
        self.heuristic_func = heuristic_func

    def search(self, start, goal):
        frontier = PriorityQueue()
        frontier.put((0, start))
        came_from = {}
        cost_so_far = {}
        came_from[start] = None
        cost_so_far[start] = 0

        while not frontier.empty():
            _, current = frontier.get()

            if current == goal:
                break

            for next_state in self.get_neighbors(current):
                new_cost = cost_so_far[current] + 1  # Assume uniform cost
                if next_state not in cost_so_far or new_cost < cost_so_far[next_state]:
                    cost_so_far[next_state] = new_cost
                    priority = new_cost + self.heuristic_func(next_state, goal)
                    frontier.put((priority, next_state))
                    came_from[next_state] = current

        return f"Best solution found from {start} to {goal}."

    def get_neighbors(self, state):
        # Example: For a numeric state, return neighboring states
        return [state - 1, state + 1]


# Initialize reasoning algorithms
deductive_reasoner = DeductiveReasoner(
    rules={
        "error": "Check for syntax errors in the code.",
        "loop": "Optimize the loop structure for better performance.",
        "null": "Ensure proper null checks are in place.",
    }
)

inductive_reasoner = InductiveReasoner()
inductive_reasoner.learn(["If it rains, the ground gets wet.", "If you study, you pass the exam."])

abductive_reasoner = AbductiveReasoner(
    hypotheses={"syntax error": 0.3, "logical error": 0.5, "runtime error": 0.2}
)

bayesian_reasoner = BayesianReasoner(prior=0.5)

heuristic_searcher = HeuristicSearcher(heuristic_func=lambda state, goal: abs(state - goal))


# Chatbot function with reasoning enhancements
def chatbot_response(message, history, reasoning_algorithm, file_content=None):
    history = history or []
    reasoning = {
        "Deductive": deductive_reasoner.infer("General rule", message),
        "Inductive": inductive_reasoner.infer(),
        "Abductive": abductive_reasoner.evaluate(message, {"syntax error": 0.8, "logical error": 0.5}),
        "Bayesian": bayesian_reasoner.update(message, likelihood=0.7),
        "Heuristic": heuristic_searcher.search(start=0, goal=10),
    }.get(reasoning_algorithm, "Invalid reasoning algorithm.")

    # Append file content if provided
    if file_content:
        reasoning += f"\n\nFile Content:\n{file_content}"

    history.append((message, reasoning))
    return history, history


# File upload handler
def handle_file_upload(file):
    if file:
        with open(file.name, "r") as f:
            content = f.read()
        return content
    return None


# Theme toggling
def toggle_theme(theme):
    if theme == "Dark":
        return gr.update(css=custom_css + ".dark-mode")
    else:
        return gr.update(css=custom_css)


# Gradio interface
with gr.Blocks(css=custom_css) as demo:
    gr.Markdown("# OpenWebUI-like Chat Interface with Reasoning Enhancements")
    with gr.Row():
        with gr.Column(scale=1, elem_id="sidebar"):
            gr.Markdown("### Settings")
            model_selector = gr.Dropdown(["Model 1", "Model 2"], label="Select Model")
            reasoning_selector = gr.Dropdown(
                ["Deductive", "Inductive", "Abductive", "Bayesian", "Heuristic"],
                label="Select Reasoning Algorithm",
                value="Deductive",
            )
            theme_selector = gr.Radio(["Light", "Dark"], label="Theme", value="Light")
            file_upload = gr.File(label="Upload File")
        with gr.Column(scale=3, elem_id="chatbot"):
            chatbot = gr.Chatbot(label="Chat")
            message = gr.Textbox(label="Your Message", placeholder="Type your message here...")
            submit = gr.Button("Send")
            state = gr.State()

    # Chat interaction
    submit.click(
        chatbot_response,
        inputs=[message, state, reasoning_selector, file_upload],
        outputs=[chatbot, state],
    )

    # File upload handling
    file_upload.change(
        handle_file_upload,
        inputs=file_upload,
        outputs=message,
    )

    # Theme toggling
    theme_selector.change(
        toggle_theme,
        inputs=theme_selector,
        outputs=None,
    )


# OpenAI-compatible API using FastAPI
app = FastAPI()

class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[dict]
    max_tokens: Optional[int] = 500
    temperature: Optional[float] = 0.7

class ChatCompletionResponse(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[dict]
    usage: dict

@app.post("/v1/chat/completions")
async def chat_completions(request: ChatCompletionRequest):
    try:
        # Extract the last user message
        user_message = request.messages[-1]["content"]

        # Generate a response using the model
        inputs = tokenizer(user_message, return_tensors="pt").to(model.device)
        outputs = model.generate(**inputs, max_length=request.max_tokens, temperature=request.temperature)
        response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

        # Format the response in OpenAI-compatible format
        response = ChatCompletionResponse(
            id="chatcmpl-12345",
            created=int(torch.tensor(0)),  # Placeholder for timestamp
            model=request.model,
            choices=[
                {
                    "message": {
                        "role": "assistant",
                        "content": response_text,
                    },
                    "finish_reason": "stop",
                    "index": 0,
                }
            ],
            usage={
                "prompt_tokens": len(tokenizer.encode(user_message)),
                "completion_tokens": len(tokenizer.encode(response_text)),
                "total_tokens": len(tokenizer.encode(user_message)) + len(tokenizer.encode(response_text)),
            },
        )
        return response
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


# Run the FastAPI server
def run_api():
    uvicorn.run(app, host="0.0.0.0", port=8000)


# Run the Gradio app
def run_gradio():
    demo.launch(server_name="0.0.0.0", server_port=7860)


# Entry point
if __name__ == "__main__":
    import threading

    # Start the FastAPI server in a separate thread
    api_thread = threading.Thread(target=run_api)
    api_thread.start()

    # Start the Gradio app
    run_gradio()