gtani commited on
Commit
e71ef4d
Β·
verified Β·
1 Parent(s): 65a325c

Delete app copy.py

Browse files
Files changed (1) hide show
  1. app copy.py +0 -89
app copy.py DELETED
@@ -1,89 +0,0 @@
1
- import gradio as gr
2
- from bedrock_client import claude_llm, get_anthropic_client, claude_stream_response
3
- from utils import load_users
4
-
5
- AUTHS = load_users('user.csv')
6
- HISTORY_LIMIT = 30 # max number of turns (user+assistant) to keep
7
-
8
-
9
- # 1) Your system prompt
10
- SYSTEM_PROMPT = (
11
- "Du bist DevalBot, ein konversationeller Assistent des Deutschen Evaluierungsinstituts für Entwicklungsbewertung (DEval). DEval bietet staatlichen und zivilgesellschaftlichen Organisationen in der Entwicklungszusammenarbeit unabhÀngige und wissenschaftlich fundierte Evaluierungen. Deine Hauptsprache ist Deutsch; antworte daher standardmÀßig auf Deutsch. Du kannst zudem bei statistischen Analysen und Programmierung in Stata und R unterstützen. Antworte sachlich, prÀzise und stelle bei Unklarheiten klÀrende Rückfragen."
12
- )
13
-
14
- def chat(user_message, history):
15
- # ── 1) Guard against empty input ─────────────────────
16
- if not user_message or not user_message.strip():
17
- return
18
-
19
- # ── 2) Build the LLM’s messages list ─────────────────
20
- # Always start with the SYSTEM_PROMPT, then the UI history,
21
- # then the new user turn:
22
- llm_messages = [{"role":"system","content":SYSTEM_PROMPT}]
23
- llm_messages += history
24
- llm_messages.append({"role":"user","content":user_message})
25
-
26
- # ── 3) Kick off the streaming call ───────────────────
27
- client = get_anthropic_client()
28
- streamer = lambda msgs: claude_stream_response(msgs, client)
29
-
30
- # ── 4) Immediately show the user’s turn in the UI ─
31
- ui_history = history + [{"role":"user","content":user_message}]
32
-
33
- full_resp = ""
34
- try:
35
- for chunk in streamer(llm_messages):
36
- full_resp += chunk
37
- # yield the UI history plus the growing assistant bubble
38
- yield ui_history + [{"role":"assistant","content": full_resp}]
39
- except Exception as e:
40
- # surface any error inline
41
- err = f"⚠️ Oops, something went wrong: {e}"
42
- yield ui_history + [{"role":"assistant","content": err}]
43
- return
44
-
45
- # ── 5) Finalize the assistant turn in the UI ─────────
46
- ui_history.append({"role":"assistant","content": full_resp})
47
-
48
- # ── 6) Trim to the last N turns ──────────────────────
49
- if len(ui_history) > HISTORY_LIMIT:
50
- ui_history = ui_history[-HISTORY_LIMIT:]
51
-
52
- yield ui_history
53
-
54
-
55
- with gr.Blocks(css_paths=["static/deval.css"],theme = gr.themes.Default(primary_hue="blue", secondary_hue="yellow"),) as demo:
56
- # ── Logo + Header + Logout ────────────────────────────────
57
-
58
- gr.Image(
59
- value="static/logo.png",
60
- show_label=False,
61
- interactive=False,
62
- show_download_button=False,
63
- show_fullscreen_button=False,
64
- elem_id="logo-primary", # matches the CSS above
65
- )
66
-
67
- #logout_btn = gr.Button("Logout", elem_id="logout-btn")
68
- # inject auto-reload script
69
- gr.HTML(
70
- """
71
- <script>
72
- // Reload the page after 1 minutes (300 000 ms)
73
- setTimeout(() => {
74
- window.location.reload();
75
- }, 1000);
76
- </script>
77
- """
78
- )
79
- gr.ChatInterface(
80
- chat,
81
- type="messages",
82
- editable=True,
83
- concurrency_limit=200,
84
- save_history=True,
85
- )
86
-
87
-
88
-
89
- demo.queue().launch(auth=AUTHS, ssr_mode=False)