ruggsea commited on
Commit
0daaa74
·
1 Parent(s): 3ddfad8

fixed structure

Browse files
Files changed (1) hide show
  1. app.py +66 -66
app.py CHANGED
@@ -12,8 +12,6 @@ DEFAULT_MAX_NEW_TOKENS = 4000
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # Llama-3.1 8B Stanford Encyclopedia of Philosophy Chat
16
-
17
  This Space showcases the Llama3.1-Instruct-SEP-Chat model from ruggsea, a fine-tuned instruction version of Meta's Llama 3.1 8B model, specifically tailored for philosophical discussions with a formal and informative tone. The model was trained using the Stanford Encyclopedia of Philosophy dataset and carefully crafted prompts.
18
 
19
  Feel free to engage in philosophical discussions and ask questions. The model supports multi-turn conversations and will maintain context.
@@ -99,69 +97,6 @@ def generate(
99
  gr.Warning(f"Error during generation: {str(e)}")
100
  yield "I apologize, but I encountered an error. Please try again."
101
 
102
- chat_interface = gr.ChatInterface(
103
- fn=generate,
104
- additional_inputs=[
105
- gr.Textbox(
106
- label="System prompt",
107
- lines=6,
108
- value="You are a knowledgeable philosophy professor using the Stanford Encyclopedia of Philosophy as your knowledge base. Provide clear, accurate responses using markdown formatting. Focus on philosophical concepts and maintain academic rigor while being accessible. Always cite relevant philosophers and concepts."
109
- ),
110
- gr.Slider(
111
- label="Max new tokens",
112
- minimum=1,
113
- maximum=MAX_MAX_NEW_TOKENS,
114
- step=1,
115
- value=DEFAULT_MAX_NEW_TOKENS,
116
- ),
117
- gr.Slider(
118
- label="Temperature",
119
- minimum=0.1,
120
- maximum=4.0,
121
- step=0.1,
122
- value=0.7,
123
- ),
124
- gr.Slider(
125
- label="Top-p (nucleus sampling)",
126
- minimum=0.05,
127
- maximum=1.0,
128
- step=0.05,
129
- value=0.9,
130
- ),
131
- gr.Slider(
132
- label="Top-k",
133
- minimum=1,
134
- maximum=1000,
135
- step=1,
136
- value=50,
137
- ),
138
- gr.Slider(
139
- label="Repetition penalty",
140
- minimum=1.0,
141
- maximum=2.0,
142
- step=0.05,
143
- value=1.1,
144
- ),
145
- ],
146
- stop_btn=None,
147
- examples=[
148
- ["What is the trolley problem and what are its main ethical implications?"],
149
- ["Can you explain Plato's Theory of Forms?"],
150
- ["What is the difference between analytic and continental philosophy?"],
151
- ["How does Kant's Categorical Imperative work?"],
152
- ["What is the problem of consciousness in philosophy of mind?"],
153
- ],
154
- title="Philosophy Chat with Llama 3.1",
155
- chatbot=gr.Chatbot(
156
- show_label=False,
157
- avatar_images=(None, None),
158
- ),
159
- autofocus=True,
160
- retry_btn=None,
161
- undo_btn=None,
162
- clear_btn=None,
163
- )
164
-
165
  with gr.Blocks(css="style.css") as demo:
166
  gr.Markdown("# Philosophy Chat with Llama 3.1")
167
  gr.Markdown(DESCRIPTION)
@@ -169,7 +104,72 @@ with gr.Blocks(css="style.css") as demo:
169
  value="Duplicate Space for private use",
170
  elem_id="duplicate-button"
171
  )
172
- chat_interface.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  gr.Markdown(LICENSE)
174
 
175
  if __name__ == "__main__":
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
 
 
15
  This Space showcases the Llama3.1-Instruct-SEP-Chat model from ruggsea, a fine-tuned instruction version of Meta's Llama 3.1 8B model, specifically tailored for philosophical discussions with a formal and informative tone. The model was trained using the Stanford Encyclopedia of Philosophy dataset and carefully crafted prompts.
16
 
17
  Feel free to engage in philosophical discussions and ask questions. The model supports multi-turn conversations and will maintain context.
 
97
  gr.Warning(f"Error during generation: {str(e)}")
98
  yield "I apologize, but I encountered an error. Please try again."
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  with gr.Blocks(css="style.css") as demo:
101
  gr.Markdown("# Philosophy Chat with Llama 3.1")
102
  gr.Markdown(DESCRIPTION)
 
104
  value="Duplicate Space for private use",
105
  elem_id="duplicate-button"
106
  )
107
+
108
+ # Add some spacing
109
+ gr.Markdown("<br>")
110
+
111
+ chat_interface = gr.ChatInterface(
112
+ fn=generate,
113
+ additional_inputs=[
114
+ gr.Textbox(
115
+ label="System prompt",
116
+ lines=6,
117
+ value="You are a knowledgeable philosophy professor using the Stanford Encyclopedia of Philosophy as your knowledge base. Provide clear, accurate responses using markdown formatting. Focus on philosophical concepts and maintain academic rigor while being accessible. Always cite relevant philosophers and concepts."
118
+ ),
119
+ gr.Slider(
120
+ label="Max new tokens",
121
+ minimum=1,
122
+ maximum=MAX_MAX_NEW_TOKENS,
123
+ step=1,
124
+ value=DEFAULT_MAX_NEW_TOKENS,
125
+ ),
126
+ gr.Slider(
127
+ label="Temperature",
128
+ minimum=0.1,
129
+ maximum=4.0,
130
+ step=0.1,
131
+ value=0.7,
132
+ ),
133
+ gr.Slider(
134
+ label="Top-p (nucleus sampling)",
135
+ minimum=0.05,
136
+ maximum=1.0,
137
+ step=0.05,
138
+ value=0.9,
139
+ ),
140
+ gr.Slider(
141
+ label="Top-k",
142
+ minimum=1,
143
+ maximum=1000,
144
+ step=1,
145
+ value=50,
146
+ ),
147
+ gr.Slider(
148
+ label="Repetition penalty",
149
+ minimum=1.0,
150
+ maximum=2.0,
151
+ step=0.05,
152
+ value=1.1,
153
+ ),
154
+ ],
155
+ stop_btn=None,
156
+ examples=[
157
+ ["What is the trolley problem and what are its main ethical implications?"],
158
+ ["Can you explain Plato's Theory of Forms?"],
159
+ ["What is the difference between analytic and continental philosophy?"],
160
+ ["How does Kant's Categorical Imperative work?"],
161
+ ["What is the problem of consciousness in philosophy of mind?"],
162
+ ],
163
+ chatbot=gr.Chatbot(
164
+ show_label=False,
165
+ avatar_images=(None, None),
166
+ ),
167
+ autofocus=True,
168
+ retry_btn=None,
169
+ undo_btn=None,
170
+ clear_btn=None,
171
+ )
172
+
173
  gr.Markdown(LICENSE)
174
 
175
  if __name__ == "__main__":