Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -43,18 +43,25 @@ async def uptime(ctx):
|
|
43 |
|
44 |
@bot.command()
|
45 |
async def ai(ctx, *, input_text: str):
|
46 |
-
"""Ask our AI model a question. (Session resets every 1 message!)"""
|
47 |
|
48 |
try:
|
49 |
client = grc.Client("https://wop-xxx-opengpt.hf.space/")
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
result = result[:-4]
|
59 |
|
60 |
# Create an embed with the AI's response
|
|
|
43 |
|
44 |
@bot.command()
|
45 |
async def ai(ctx, *, input_text: str):
|
46 |
+
"""Ask our AI model a question. (Session resets every 1 message!)"""
|
47 |
|
48 |
try:
|
49 |
client = grc.Client("https://wop-xxx-opengpt.hf.space/")
|
50 |
+
|
51 |
+
# Asynchronously make the prediction request
|
52 |
+
prediction_task = asyncio.create_task(client.predict(
|
53 |
+
input_text,
|
54 |
+
0.9,
|
55 |
+
1800,
|
56 |
+
0.9,
|
57 |
+
1.2,
|
58 |
+
api_name="/chat"
|
59 |
+
))
|
60 |
+
|
61 |
+
# Wait for the prediction task to complete
|
62 |
+
result = await prediction_task
|
63 |
+
|
64 |
+
# Truncate the last 4 characters from the result (if necessary)
|
65 |
result = result[:-4]
|
66 |
|
67 |
# Create an embed with the AI's response
|