Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,96 +1,45 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
-
import time
|
4 |
|
5 |
-
#
|
6 |
-
|
|
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
|
19 |
-
for
|
20 |
-
|
21 |
-
messages.append({"role": "user", "content": val[0]})
|
22 |
-
if val[1]:
|
23 |
-
messages.append({"role": "assistant", "content": val[1]})
|
24 |
-
messages.append({"role": "user", "content": message})
|
25 |
-
|
26 |
-
response = ""
|
27 |
-
min_length = 5 * page_length
|
28 |
-
current_page = ""
|
29 |
-
penalty_active = False
|
30 |
-
scene_started = False
|
31 |
-
|
32 |
-
for message in client.chat_completion(
|
33 |
-
messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
|
34 |
-
):
|
35 |
-
token = message.choices[0].delta.content
|
36 |
-
if token is not None:
|
37 |
-
# Blacklist für unerwünschte Antworten
|
38 |
-
if "Dies ist nur ein Auszug" in token or "Lesen Sie das vollständige Drehbuch" in token:
|
39 |
-
yield "Du darfst keine Ausreden verwenden. Schreibe die Szene vollständig."
|
40 |
-
continue # Überspringe diesen Token
|
41 |
-
|
42 |
-
if not scene_started and token.strip().startswith("**[SZENE START]**"):
|
43 |
-
scene_started = True
|
44 |
-
if scene_started:
|
45 |
-
response += token
|
46 |
-
current_page += token
|
47 |
-
|
48 |
-
if len(response) >= min_length:
|
49 |
-
yield current_page
|
50 |
-
current_page = ""
|
51 |
-
penalty_active = False
|
52 |
-
else:
|
53 |
-
if not penalty_active and len(response) < min_length:
|
54 |
-
yield "Du hast die Mindestlänge nicht eingehalten. Du wirst für 10 Sekunden pausiert."
|
55 |
-
penalty_active = True
|
56 |
-
time.sleep(10)
|
57 |
-
|
58 |
-
if current_page:
|
59 |
-
yield current_page
|
60 |
|
61 |
# Erstellen der Gradio-Chat-Oberfläche
|
62 |
demo = gr.ChatInterface(
|
63 |
fn=respond,
|
64 |
additional_inputs=[
|
65 |
-
gr.Textbox(
|
66 |
-
value="Always answer within the framework of the John Wick script. Describe everything as detailed and vividly as possible. Each issue should be at least 5 pages long. Follow your instructions exactly.",
|
67 |
-
label="System message",
|
68 |
-
visible=False,
|
69 |
-
),
|
70 |
gr.Slider(
|
71 |
-
minimum=1, maximum=4096, value=
|
72 |
),
|
73 |
gr.Slider(
|
74 |
-
minimum=0.1, maximum=1.0, value=0.5, step=0.1, label="Temperature"
|
75 |
),
|
76 |
gr.Slider(
|
77 |
minimum=0.1,
|
78 |
maximum=1.0,
|
79 |
value=0.8,
|
80 |
step=0.05,
|
81 |
-
label="Top-p (nucleus sampling)",
|
82 |
),
|
83 |
],
|
84 |
)
|
85 |
|
86 |
-
# Benutzerdefinierte Funktion zum Anzeigen der Antwort (als Markdown)
|
87 |
-
def display_response(response):
|
88 |
-
return gr.Markdown(f"**[SZENE START]**\n\n{response}")
|
89 |
-
|
90 |
-
with demo:
|
91 |
-
gr.Markdown("**[SZENE START]**") # Initialer Szenenstart
|
92 |
-
output = gr.Chatbot()
|
93 |
-
demo.output_component = output
|
94 |
-
|
95 |
if __name__ == "__main__":
|
96 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from llama_cpp import Llama
|
|
|
3 |
|
4 |
+
# Lade das Modell
|
5 |
+
model_path = "DavidAU/MPT-7b-WizardLM_Uncensored-Storywriter-Merge-Q4_K_M-GGUF"
|
6 |
+
llm = Llama(model_path=model_path, n_ctx=2048)
|
7 |
|
8 |
+
def respond(message, history, max_tokens, temperature, top_p):
|
9 |
+
messages_input = [{"role": "system", "content": system_message}] + [
|
10 |
+
{"role": m[0], "content": m[1]} for m in history
|
11 |
+
] + [{"role": "user", "content": message}]
|
12 |
|
13 |
+
output = llm(
|
14 |
+
messages_input,
|
15 |
+
max_tokens=max_tokens,
|
16 |
+
temperature=temperature,
|
17 |
+
top_p=top_p,
|
18 |
+
stream=True,
|
19 |
+
)
|
20 |
|
21 |
+
for chunk in output:
|
22 |
+
yield chunk["choices"][0]["delta"].get("content", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
# Erstellen der Gradio-Chat-Oberfläche
|
25 |
demo = gr.ChatInterface(
|
26 |
fn=respond,
|
27 |
additional_inputs=[
|
|
|
|
|
|
|
|
|
|
|
28 |
gr.Slider(
|
29 |
+
minimum=1, maximum=4096, value=2000, step=1, label="Max new tokens"
|
30 |
),
|
31 |
gr.Slider(
|
32 |
+
minimum=0.1, maximum=1.0, value=0.5, step=0.1, label="Temperature"
|
33 |
),
|
34 |
gr.Slider(
|
35 |
minimum=0.1,
|
36 |
maximum=1.0,
|
37 |
value=0.8,
|
38 |
step=0.05,
|
39 |
+
label="Top-p (nucleus sampling)",
|
40 |
),
|
41 |
],
|
42 |
)
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
if __name__ == "__main__":
|
45 |
demo.launch()
|