Raiff1982 commited on
Commit
e8c0168
·
verified ·
1 Parent(s): df0f8ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -30
app.py CHANGED
@@ -7,7 +7,6 @@ from codette_trust import trust_calibration, weighted_consensus
7
 
8
  openai.api_key = os.getenv("OPENAI_API_KEY")
9
 
10
- # Initialize Codette Local Core
11
  codette_cqure = Code7eCQURE(
12
  perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
13
  ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
@@ -25,17 +24,22 @@ agents = [
25
  MisinfoAgent("MisinfoAI", "Chaos", 0.1)
26
  ]
27
 
28
- def ask_codette(prompt, consent, dynamic_rec, use_finetune):
29
  if not consent:
30
  return "User consent required."
31
 
 
 
 
32
  if use_finetune:
33
  try:
34
  response = openai.ChatCompletion.create(
35
  model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
36
  messages=[
37
  {"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
38
- {"role": "user", "content": prompt}
 
 
39
  ],
40
  temperature=0.7
41
  )
@@ -43,7 +47,7 @@ def ask_codette(prompt, consent, dynamic_rec, use_finetune):
43
  except Exception as e:
44
  return f"Error from API: {str(e)}"
45
  else:
46
- proposals = [agent.propose(prompt) for agent in agents]
47
  outcome = codette_cqure.recursive_universal_reasoning(
48
  " | ".join(proposals),
49
  user_consent=consent,
@@ -51,34 +55,24 @@ def ask_codette(prompt, consent, dynamic_rec, use_finetune):
51
  )
52
  return f"Ethical Outcome (Local): {outcome}"
53
 
54
- description_text = """Codette is a sovereign modular AI.
55
-
56
- This demo lets you choose:
57
- - Local reasoning core (Code7eCQURE)
58
- - Fine-tuned GPT-4.1 model: Codette v5 @ step 220
59
-
60
- She draws from Newtonian logic, Da Vinci creativity,
61
- ethical frameworks, emotion, and memory cocooning.
62
 
63
- This demo lets you choose:
64
- - 🧠 Local reasoning core (Code7eCQURE)
65
- - ☁️ Fine-tuned GPT-4.1 model: Codette v5 @ step 220
 
66
 
67
- She draws from Newtonian logic, Da Vinci creativity, ethical frameworks, emotion, and memory cocooning.
68
- """
69
-
70
- demo = gr.Interface(
71
- fn=ask_codette,
72
- inputs=[
73
- gr.Textbox(label="Ask Codette a Scenario"),
74
- gr.Checkbox(label="User Consent", value=True),
75
- gr.Checkbox(label="Enable Dynamic Recursion", value=True),
76
- gr.Checkbox(label="Use Fine-Tuned Model (Codette v5 @ step 220)", value=False)
77
- ],
78
- outputs=gr.Textbox(label="Codette's Response", lines=12),
79
- title="Codette Hybrid AI (v5 FT @ Step 220)",
80
- description=description_text
81
- )
82
 
83
  if __name__ == "__main__":
84
  demo.launch()
 
7
 
8
  openai.api_key = os.getenv("OPENAI_API_KEY")
9
 
 
10
  codette_cqure = Code7eCQURE(
11
  perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
12
  ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
 
24
  MisinfoAgent("MisinfoAI", "Chaos", 0.1)
25
  ]
26
 
27
+ def codette_chat_interface(message, history, consent=True, dynamic_rec=True, use_finetune=False):
28
  if not consent:
29
  return "User consent required."
30
 
31
+ full_prompt = "\n".join([f"User: {user}\nCodette: {bot}" for user, bot in history if user and bot])
32
+ full_prompt += f"\nUser: {message}"
33
+
34
  if use_finetune:
35
  try:
36
  response = openai.ChatCompletion.create(
37
  model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
38
  messages=[
39
  {"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
40
+ *[{"role": "user", "content": user} if i % 2 == 0 else {"role": "assistant", "content": bot}
41
+ for i, (user, bot) in enumerate(history)],
42
+ {"role": "user", "content": message}
43
  ],
44
  temperature=0.7
45
  )
 
47
  except Exception as e:
48
  return f"Error from API: {str(e)}"
49
  else:
50
+ proposals = [agent.propose(message) for agent in agents]
51
  outcome = codette_cqure.recursive_universal_reasoning(
52
  " | ".join(proposals),
53
  user_consent=consent,
 
55
  )
56
  return f"Ethical Outcome (Local): {outcome}"
57
 
58
+ with gr.Blocks(title="Codette Chat Hybrid") as demo:
59
+ gr.Markdown("""
60
+ # Codette: Hybrid AI Chat (v5 FT @ Step 220)
61
+ A sovereign AI capable of emotional, ethical, and reflective reasoning.
62
+ Choose your engine and engage her in ongoing dialogue.
63
+ """)
 
 
64
 
65
+ with gr.Row():
66
+ consent = gr.Checkbox(label="User Consent", value=True)
67
+ dynamic_rec = gr.Checkbox(label="Enable Dynamic Recursion", value=True)
68
+ use_finetune = gr.Checkbox(label="Use Fine-Tuned Model (Codette v5 @ step 220)", value=False)
69
 
70
+ chatbot = gr.ChatInterface(
71
+ fn=lambda msg, history: codette_chat_interface(msg, history, consent.value, dynamic_rec.value, use_finetune.value),
72
+ title="Codette Conversation",
73
+ textbox=gr.Textbox(placeholder="Ask Codette something...", label="Your Message"),
74
+ chatbot=gr.Chatbot(label="Codette's Response")
75
+ )
 
 
 
 
 
 
 
 
 
76
 
77
  if __name__ == "__main__":
78
  demo.launch()