Tijmen2 commited on
Commit
96edac1
Β·
verified Β·
1 Parent(s): ef6cbea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -134
app.py CHANGED
@@ -3,40 +3,6 @@ from llama_cpp import Llama
3
  from huggingface_hub import hf_hub_download
4
  import random
5
 
6
- # Custom CSS for better styling
7
- custom_css = """
8
- .gradio-container {
9
- background: linear-gradient(to bottom, #1a1a2e, #16213e) !important;
10
- }
11
- .header-text {
12
- text-align: center;
13
- color: #e2e8f0;
14
- font-size: 2.5em;
15
- font-weight: bold;
16
- margin: 1em 0;
17
- text-shadow: 0 0 10px rgba(255, 255, 255, 0.3);
18
- }
19
- .subheader {
20
- text-align: center;
21
- color: #94a3b8;
22
- font-size: 1.2em;
23
- margin-bottom: 2em;
24
- }
25
- .controls-section {
26
- background: rgba(255, 255, 255, 0.05);
27
- padding: 1.5em;
28
- border-radius: 10px;
29
- margin: 1em 0;
30
- }
31
- .model-info {
32
- background: rgba(0, 0, 0, 0.2);
33
- padding: 1em;
34
- border-radius: 8px;
35
- margin-top: 1em;
36
- color: #94a3b8;
37
- }
38
- """
39
-
40
  # Initialize model
41
  model_path = hf_hub_download(
42
  repo_id="AstroMLab/AstroSage-8B-GGUF",
@@ -75,43 +41,25 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
75
  messages.append({"role": "assistant", "content": assistant_msg})
76
  messages.append({"role": "user", "content": message})
77
 
78
- response = llm.create_chat_completion(
79
- messages=messages,
80
- max_tokens=max_tokens,
81
- temperature=temperature,
82
- top_p=top_p
83
- )
84
-
85
- return response["choices"][0]["message"]["content"]
86
-
87
- def regenerate(message, history, system_message, max_tokens, temperature, top_p):
88
- # Remove the last assistant message from history
89
- if history and len(history) > 0:
90
- history = history[:-1]
91
-
92
- # Generate a new response
93
- return respond(message, history, system_message, max_tokens, temperature, top_p)
94
 
95
  def clear_context():
96
  return [], get_random_greeting()
97
 
98
- with gr.Blocks(css=custom_css) as demo:
99
- gr.HTML(
100
- """
101
- <div class="header-text">🌌 AstroSage-LLAMA-3.1-8B</div>
102
- <div class="subheader">Your AI Guide to the Cosmos</div>
103
- """
104
- )
105
 
106
- chatbot = gr.Chatbot(
107
- value=[[None, get_random_greeting()]],
108
- height=400,
109
- show_label=False,
110
- )
111
- msg = gr.Textbox(
112
- placeholder="Ask about astronomy, astrophysics, or cosmology...",
113
- show_label=False,
114
- )
115
 
116
  with gr.Accordion("Advanced Settings", open=False) as advanced_settings:
117
  system_msg = gr.Textbox(
@@ -119,82 +67,26 @@ with gr.Blocks(css=custom_css) as demo:
119
  label="System Message",
120
  lines=3
121
  )
122
- with gr.Row():
123
- max_tokens = gr.Slider(
124
- minimum=1,
125
- maximum=2048,
126
- value=512,
127
- step=1,
128
- label="Max Tokens"
129
- )
130
- temperature = gr.Slider(
131
- minimum=0.1,
132
- maximum=4.0,
133
- value=0.7,
134
- step=0.1,
135
- label="Temperature"
136
- )
137
- top_p = gr.Slider(
138
- minimum=0.1,
139
- maximum=1.0,
140
- value=0.9,
141
- step=0.05,
142
- label="Top-p"
143
- )
144
 
145
  with gr.Row():
146
  clear = gr.Button("🌟 New Chat")
147
- regenerate_btn = gr.Button("πŸ”„ Regenerate")
148
  submit = gr.Button("Send πŸš€", variant="primary")
149
 
150
- gr.HTML(
151
- """
152
- <div class="model-info">
153
- <p>πŸ“š Model: AstroSage-LLAMA-3.1-8B (8-bit Quantized)</p>
154
- <p>πŸ”§ Built with llama.cpp, Gradio, and Python</p>
155
- <p>πŸ’« Specialized in astronomy, astrophysics, and cosmology</p>
156
- </div>
157
- """
158
- )
159
-
160
- # Set up event handlers
161
- msg.submit(
162
- respond,
163
- [msg, chatbot, system_msg, max_tokens, temperature, top_p],
164
- [chatbot],
165
- queue=False
166
- ).then(
167
- lambda: "",
168
- None,
169
- [msg],
170
- queue=False
171
- )
172
-
173
  submit.click(
174
- respond,
175
  [msg, chatbot, system_msg, max_tokens, temperature, top_p],
176
- [chatbot],
177
- queue=False
178
- ).then(
179
- lambda: "",
180
- None,
181
- [msg],
182
- queue=False
183
  )
184
 
185
- regenerate_btn.click(
186
- regenerate,
187
- [msg, chatbot, system_msg, max_tokens, temperature, top_p],
188
- [chatbot],
189
- queue=False
190
- )
191
-
192
- clear.click(
193
- clear_context,
194
- None,
195
- [chatbot, msg],
196
- queue=False
197
- )
198
 
199
  if __name__ == "__main__":
200
- demo.launch()
 
3
  from huggingface_hub import hf_hub_download
4
  import random
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  # Initialize model
7
  model_path = hf_hub_download(
8
  repo_id="AstroMLab/AstroSage-8B-GGUF",
 
41
  messages.append({"role": "assistant", "content": assistant_msg})
42
  messages.append({"role": "user", "content": message})
43
 
44
+ try:
45
+ response = llm.create_chat_completion(
46
+ messages=messages,
47
+ max_tokens=max_tokens,
48
+ temperature=temperature,
49
+ top_p=top_p
50
+ )
51
+ return response["choices"][0]["message"]["content"]
52
+ except Exception as e:
53
+ return f"Error: {e}"
 
 
 
 
 
 
54
 
55
  def clear_context():
56
  return [], get_random_greeting()
57
 
58
+ with gr.Blocks() as demo:
59
+ gr.HTML("<div class='header-text'>🌌 AstroSage-LLAMA-3.1-8B</div><div class='subheader'>Your AI Guide to the Cosmos</div>")
 
 
 
 
 
60
 
61
+ chatbot = gr.Chatbot(height=400)
62
+ msg = gr.Textbox(placeholder="Ask about astronomy, astrophysics, or cosmology...")
 
 
 
 
 
 
 
63
 
64
  with gr.Accordion("Advanced Settings", open=False) as advanced_settings:
65
  system_msg = gr.Textbox(
 
67
  label="System Message",
68
  lines=3
69
  )
70
+ max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Max Tokens")
71
+ temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
72
+ top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  with gr.Row():
75
  clear = gr.Button("🌟 New Chat")
 
76
  submit = gr.Button("Send πŸš€", variant="primary")
77
 
78
+ def handle_submit(message, history, system_message, max_tokens, temperature, top_p):
79
+ response = respond(message, history, system_message, max_tokens, temperature, top_p)
80
+ history.append((message, response))
81
+ return history, ""
82
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  submit.click(
84
+ handle_submit,
85
  [msg, chatbot, system_msg, max_tokens, temperature, top_p],
86
+ [chatbot, msg]
 
 
 
 
 
 
87
  )
88
 
89
+ clear.click(clear_context, [], [chatbot, msg])
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  if __name__ == "__main__":
92
+ demo.launch()