Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -226,7 +226,7 @@ def handle_idle_state():
|
|
226 |
|
227 |
# S.O.U.L. (Self-Organizing Universal Learning) Function
|
228 |
class SOUL:
|
229 |
-
def __init__(self, model_name='EleutherAI/gpt-neox-
|
230 |
self.tokenizer = GPTNeoXTokenizerFast.from_pretrained(model_name)
|
231 |
self.model = GPTNeoXForCausalLM.from_pretrained(model_name)
|
232 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
@@ -266,6 +266,7 @@ def interact_with_soul(user_input):
|
|
266 |
neox_response, emotional_response = soul.bridge_ai(user_input)
|
267 |
return neox_response, emotional_response
|
268 |
|
|
|
269 |
iface = gr.Interface(
|
270 |
fn=interact_with_soul,
|
271 |
inputs="text",
|
@@ -274,4 +275,5 @@ iface = gr.Interface(
|
|
274 |
description="Enter a prompt to interact with the S.O.U.L AI, which will generate a response and provide an emotional analysis."
|
275 |
)
|
276 |
|
|
|
277 |
iface.launch()
|
|
|
226 |
|
227 |
# S.O.U.L. (Self-Organizing Universal Learning) Function
|
228 |
class SOUL:
|
229 |
+
def __init__(self, model_name='EleutherAI/gpt-neox-2.7B'):
|
230 |
self.tokenizer = GPTNeoXTokenizerFast.from_pretrained(model_name)
|
231 |
self.model = GPTNeoXForCausalLM.from_pretrained(model_name)
|
232 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
266 |
neox_response, emotional_response = soul.bridge_ai(user_input)
|
267 |
return neox_response, emotional_response
|
268 |
|
269 |
+
# Gradio interface setup
|
270 |
iface = gr.Interface(
|
271 |
fn=interact_with_soul,
|
272 |
inputs="text",
|
|
|
275 |
description="Enter a prompt to interact with the S.O.U.L AI, which will generate a response and provide an emotional analysis."
|
276 |
)
|
277 |
|
278 |
+
# Launch the interface
|
279 |
iface.launch()
|