Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
#QuantumNova
|
2 |
|
3 |
-
import
|
4 |
import random
|
5 |
import json
|
6 |
from huggingface_hub import InferenceClient
|
7 |
-
import
|
8 |
|
9 |
|
10 |
class QuasiKI:
|
@@ -33,27 +33,23 @@ class QuasiKI:
|
|
33 |
print(f"Fehler beim Abrufen von Quanten-Zufallszahlen: {e}")
|
34 |
self.quantum_randomness = [random.randint(0, 255) for _ in range(10)]
|
35 |
|
36 |
-
def
|
37 |
-
user_input = user_input.strip()
|
38 |
-
if not user_input:
|
39 |
-
return "Bitte gib eine gültige Eingabe ein."
|
40 |
-
|
41 |
-
if user_input.lower().startswith("suche nach"):
|
42 |
-
query = user_input[10:].strip()
|
43 |
-
if not query:
|
44 |
-
return "Bitte gib etwas ein, wonach ich suchen soll."
|
45 |
-
print(f"Suche nach: {query}")
|
46 |
-
return "Die Websuche ist vorübergehend deaktiviert."
|
47 |
-
|
48 |
-
return self.generate_response(user_input)
|
49 |
-
|
50 |
-
def generate_response(self, input_text):
|
51 |
if not self.client:
|
52 |
return "Das Modell ist derzeit nicht verfügbar."
|
53 |
|
54 |
try:
|
55 |
-
response = self.client.chat_completion([{"role": "user", "content": input_text}]
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
except Exception as e:
|
58 |
return f"Fehler beim Generieren der Antwort: {e}"
|
59 |
|
@@ -98,23 +94,49 @@ class QuasiKI:
|
|
98 |
print("Kein gespeichertes Gedächtnis gefunden.")
|
99 |
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
ai.learn(feedback_score)
|
119 |
-
except Exception as e:
|
120 |
-
print(f"Fehler beim Sammeln von Feedback: {e}")
|
|
|
1 |
#QuantumNova
|
2 |
|
3 |
+
import gradio as gr
|
4 |
import random
|
5 |
import json
|
6 |
from huggingface_hub import InferenceClient
|
7 |
+
import requests
|
8 |
|
9 |
|
10 |
class QuasiKI:
|
|
|
33 |
print(f"Fehler beim Abrufen von Quanten-Zufallszahlen: {e}")
|
34 |
self.quantum_randomness = [random.randint(0, 255) for _ in range(10)]
|
35 |
|
36 |
+
def generate_response(self, input_text, max_tokens, temperature, top_p):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
if not self.client:
|
38 |
return "Das Modell ist derzeit nicht verfügbar."
|
39 |
|
40 |
try:
|
41 |
+
response = self.client.chat_completion([{"role": "user", "content": input_text}],
|
42 |
+
max_tokens=max_tokens,
|
43 |
+
temperature=temperature,
|
44 |
+
top_p=top_p,
|
45 |
+
stream=True)
|
46 |
+
|
47 |
+
response_text = ""
|
48 |
+
for message in response:
|
49 |
+
token = message.choices[0].delta.content
|
50 |
+
response_text += token
|
51 |
+
return response_text.strip()
|
52 |
+
|
53 |
except Exception as e:
|
54 |
return f"Fehler beim Generieren der Antwort: {e}"
|
55 |
|
|
|
94 |
print("Kein gespeichertes Gedächtnis gefunden.")
|
95 |
|
96 |
|
97 |
+
# Gradio-Interface-Funktion
|
98 |
+
def respond(
|
99 |
+
message,
|
100 |
+
history: list,
|
101 |
+
system_message,
|
102 |
+
max_tokens,
|
103 |
+
temperature,
|
104 |
+
top_p,
|
105 |
+
):
|
106 |
+
ai = QuasiKI(max_feedback=3) # QuasiKI mit Feedback limit initialisieren
|
107 |
+
ai.fetch_quantum_randomness() # Quanten-Zufallszahlen abrufen
|
108 |
+
ai.load_memory() # Gedächtnis laden
|
109 |
+
|
110 |
+
messages = [{"role": "system", "content": system_message}]
|
111 |
+
|
112 |
+
for val in history:
|
113 |
+
if val[0]:
|
114 |
+
messages.append({"role": "user", "content": val[0]})
|
115 |
+
if val[1]:
|
116 |
+
messages.append({"role": "assistant", "content": val[1]})
|
117 |
+
|
118 |
+
messages.append({"role": "user", "content": message})
|
119 |
+
|
120 |
+
response_text = ai.generate_response(message, max_tokens, temperature, top_p)
|
121 |
+
return response_text
|
122 |
+
|
123 |
+
|
124 |
+
# Gradio Demo starten
|
125 |
+
demo = gr.ChatInterface(
|
126 |
+
respond,
|
127 |
+
additional_inputs=[
|
128 |
+
gr.Textbox(value="Du bist ein freundlicher Chatbot.", label="Systemnachricht"),
|
129 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Maximale neue Token"),
|
130 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperatur"),
|
131 |
+
gr.Slider(
|
132 |
+
minimum=0.1,
|
133 |
+
maximum=1.0,
|
134 |
+
value=0.95,
|
135 |
+
step=0.05,
|
136 |
+
label="Top-p (Nucleus Sampling)",
|
137 |
+
),
|
138 |
+
],
|
139 |
+
)
|
140 |
|
141 |
+
if __name__ == "__main__":
|
142 |
+
demo.launch()
|
|
|
|
|
|