Leonydis137 commited on
Commit
58a4c8c
·
verified ·
1 Parent(s): 3f833e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -1
app.py CHANGED
@@ -4,6 +4,101 @@ import time
4
  import logging
5
  from typing import List, Dict, Any, Optional
6
  import random
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  # Logging config for traceability
9
  logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(message)s')
@@ -386,4 +481,5 @@ class OmniAIvX:
386
  def encrypt_and_store(self, data: str):
387
  encrypted = self.security_privacy.encrypt(data)
388
  self.memory.store_long_term(encrypted)
389
-
 
 
4
  import logging
5
  from typing import List, Dict, Any, Optional
6
  import random
7
+ import gradio as gr
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+ import json
10
+ from pathlib import Path
11
+ from datetime import datetime
12
+
13
+ MODEL_NAME = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
14
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
15
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to("cpu").eval()
16
+
17
+ CHANGELOG_FILE = Path("changelog.json")
18
+ CHATLOG_FILE = Path("chatlog.json")
19
+
20
+ def generate_reply(user_input, chat_history, temperature, max_tokens):
21
+ prompt = "You are a helpful AI assistant.\n"
22
+ for user_msg, bot_msg in chat_history:
23
+ prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
24
+ prompt += f"User: {user_input}\nAssistant:"
25
+
26
+ inputs = tokenizer(prompt, return_tensors="pt", padding=True)
27
+ outputs = model.generate(
28
+ **inputs,
29
+ max_new_tokens=max_tokens,
30
+ temperature=temperature,
31
+ do_sample=True,
32
+ top_p=0.9,
33
+ pad_token_id=tokenizer.eos_token_id or tokenizer.pad_token_id,
34
+ eos_token_id=tokenizer.eos_token_id,
35
+ )
36
+ reply = tokenizer.decode(outputs[0][inputs['input_ids'].shape[-1]:], skip_special_tokens=True).strip()
37
+ chat_history.append((user_input, reply))
38
+
39
+ # Save chat log after each exchange
40
+ save_chat_log(chat_history)
41
+ return "", chat_history
42
+
43
+ def save_chat_log(chat_history):
44
+ data = {
45
+ "timestamp": datetime.now().isoformat(),
46
+ "chat": chat_history,
47
+ }
48
+ if CHATLOG_FILE.exists():
49
+ old_data = json.loads(CHATLOG_FILE.read_text())
50
+ else:
51
+ old_data = []
52
+ old_data.append(data)
53
+ CHATLOG_FILE.write_text(json.dumps(old_data, indent=2))
54
+
55
+ def generate_suggestions(feedback):
56
+ # Simple mock suggestions based on feedback input
57
+ # Replace with real model inference if desired
58
+ suggestions = [
59
+ f"Improve clarity on: {feedback[:50]}...",
60
+ "Add context awareness for better follow-up answers.",
61
+ "Enhance error handling for invalid inputs.",
62
+ ]
63
+ # Save suggestions to changelog file
64
+ save_changelog(suggestions)
65
+ return "\n- " + "\n- ".join(suggestions)
66
+
67
+ def save_changelog(suggestions):
68
+ data = {
69
+ "timestamp": datetime.now().isoformat(),
70
+ "suggestions": suggestions,
71
+ }
72
+ if CHANGELOG_FILE.exists():
73
+ old_data = json.loads(CHANGELOG_FILE.read_text())
74
+ else:
75
+ old_data = []
76
+ old_data.append(data)
77
+ CHANGELOG_FILE.write_text(json.dumps(old_data, indent=2))
78
+
79
+ with gr.Blocks(title="Autonomous AI Minimal with Self-Improve") as demo:
80
+ gr.Markdown("# 🤖 Autonomous AI with Feedback & Logging")
81
+
82
+ with gr.Tab("Chat"):
83
+ chatbot = gr.Chatbot(label="Chat")
84
+ user_input = gr.Textbox(placeholder="Type your message here...", lines=2, label="Your Message")
85
+ temperature = gr.Slider(0.1, 1.2, value=0.7, step=0.05, label="Temperature")
86
+ max_tokens = gr.Slider(50, 300, value=150, step=25, label="Max Tokens")
87
+ send_btn = gr.Button("Send")
88
+ reset_btn = gr.Button("Reset")
89
+
90
+ state = gr.State([])
91
+
92
+ send_btn.click(generate_reply, inputs=[user_input, state, temperature, max_tokens], outputs=[user_input, chatbot])
93
+ reset_btn.click(lambda: [], None, chatbot)
94
+ reset_btn.click(lambda: [], None, state)
95
+
96
+ with gr.Tab("Self-Improve"):
97
+ feedback = gr.Textbox(label="Enter feedback or observations", lines=3)
98
+ suggest_btn = gr.Button("Generate Suggestions")
99
+ suggestions = gr.Textbox(label="Improvement Suggestions", interactive=False, lines=6)
100
+
101
+ suggest_btn.click(generate_suggestions, inputs=feedback, outputs=suggestions)
102
 
103
  # Logging config for traceability
104
  logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(message)s')
 
481
  def encrypt_and_store(self, data: str):
482
  encrypted = self.security_privacy.encrypt(data)
483
  self.memory.store_long_term(encrypted)
484
+ if __name__ == "__main__":
485
+ demo.launch(share=True)