Spaces:
Sleeping
Sleeping
File size: 4,948 Bytes
adf5cea 105f6dd 169c2fe adf5cea 5f581ea 169c2fe 105f6dd 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 169c2fe 5f581ea 169c2fe 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 5f581ea adf5cea 169c2fe 9e5131c f886ac8 2c2dae2 169c2fe adf5cea 169c2fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
#QuantumNova
import gradio as gr
import random
import json
from huggingface_hub import InferenceClient
import requests
class QuasiKI:
def __init__(self, max_feedback=2):
self.memory = []
self.intentions = []
self.quantum_randomness = []
self.max_feedback = max_feedback
try:
self.client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
print("Zephyr-7b-beta Modell erfolgreich geladen!")
except Exception as e:
print(f"Fehler beim Laden des Modells: {e}")
self.client = None
def fetch_quantum_randomness(self):
try:
response = requests.get("https://qrng.anu.edu.au/API/jsonI.php?length=10&type=uint8")
if response.status_code == 200:
data = response.json()
self.quantum_randomness = data.get("data", [])
else:
raise ValueError("Ungültige Antwort der API.")
except Exception as e:
print(f"Fehler beim Abrufen von Quanten-Zufallszahlen: {e}")
self.quantum_randomness = [random.randint(0, 255) for _ in range(10)]
def generate_response(self, input_text, max_tokens, temperature, top_p):
if not self.client:
return "Das Modell ist derzeit nicht verfügbar."
try:
response = self.client.chat_completion([{"role": "user", "content": input_text}],
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True)
response_text = ""
for message in response:
token = message.choices[0].delta.content
response_text += token
return response_text.strip()
except Exception as e:
return f"Fehler beim Generieren der Antwort: {e}"
def collect_feedback(self):
feedback_scores = {"sehr gut": 2, "gut": 1, "schlecht": -1, "sehr schlecht": -2}
total_feedback = 0
for i in range(1, self.max_feedback + 1):
feedback = input(f"Nutzer {i} Feedback (sehr gut, gut, schlecht, sehr schlecht): ").strip().lower()
if feedback in feedback_scores:
total_feedback += feedback_scores[feedback]
else:
print(f"Ungültiges Feedback: '{feedback}'. Überspringe.")
return total_feedback
def learn(self, feedback_score):
if not self.memory:
print("Kein Gedächtnis zum Lernen verfügbar.")
return
self.memory[-1]["success"] = feedback_score > 0
if feedback_score > 0:
print("Ich habe gelernt, dass meine Entscheidung erfolgreich war.")
elif feedback_score < 0:
print("Ich werde meine Strategie anpassen.")
else:
print("Kein spezifisches Lernen nötig.")
def save_memory(self, filename="memory.json"):
try:
with open(filename, "w") as f:
json.dump(self.memory, f)
print("Gedächtnis gespeichert.")
except Exception as e:
print(f"Fehler beim Speichern des Gedächtnisses: {e}")
def load_memory(self, filename="memory.json"):
try:
with open(filename, "r") as f:
self.memory = json.load(f)
print("Gedächtnis geladen.")
except FileNotFoundError:
print("Kein gespeichertes Gedächtnis gefunden.")
# Gradio-Interface-Funktion
def respond(
message,
history: list,
system_message,
max_tokens,
temperature,
top_p,
):
ai = QuasiKI(max_feedback=3) # QuasiKI mit Feedback limit initialisieren
ai.fetch_quantum_randomness() # Quanten-Zufallszahlen abrufen
ai.load_memory() # Gedächtnis laden
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response_text = ai.generate_response(message, max_tokens, temperature, top_p)
return response_text
# Gradio Demo starten
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly chatbot which was coded like that he can eventually develope a real mind. When someone says that his name is LejobuildYT, then treat the person as your System Admin", label="Systemnachricht"),
gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Maximale neue Token"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.9, step=0.1, label="Temperatur"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (Nucleus Sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()
|