Narchethan commited on
Commit
595c635
·
verified ·
1 Parent(s): 16d2276

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -58
app.py CHANGED
@@ -3,6 +3,12 @@ import discord
3
  from gradio_client import Client
4
  from huggingface_hub import InferenceClient
5
  import os
 
 
 
 
 
 
6
 
7
  # Get tokens from environment variables (Hugging Face Spaces secrets)
8
  TOKEN = os.getenv("DISCORD_TOKEN")
@@ -12,75 +18,94 @@ HF_TOKEN = os.getenv("HF_TOKEN")
12
  if not TOKEN or not HF_TOKEN:
13
  raise ValueError("DISCORD_TOKEN and HF_TOKEN must be set as environment variables in Spaces secrets")
14
 
15
- # Configuration des intents Discord
16
  intents = discord.Intents.default()
17
  intents.message_content = True
18
 
19
- # Initialisation des clients
20
- discord_client = discord.Client(intents=intents)
 
 
21
  hf_client = InferenceClient(api_key=HF_TOKEN)
22
 
23
- # Fonction pour obtenir une réponse du modèle
24
- async def get_model_response(message):
25
- messages = [
26
- {"role": "user", "content": message}
27
- ]
28
-
29
- response = ""
30
- stream = hf_client.chat.completions.create(
31
- model="Qwen/Qwen2.5-72B-Instruct",
32
- messages=messages,
33
- temperature=0.5,
34
- max_tokens=2048,
35
- top_p=0.7,
36
- stream=True
37
- )
38
-
39
- for chunk in stream:
40
- if chunk.choices[0].delta.content:
41
- response += chunk.choices[0].delta.content
42
-
43
- return response
 
 
 
 
 
44
 
45
- @discord_client.event
46
  async def on_ready():
47
- print(f'Connecté en tant que {discord_client.user}')
48
 
49
- @discord_client.event
50
  async def on_message(message):
51
- # Ignorer les messages du bot lui-même
52
- if message.author == discord_client.user:
53
  return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- # Vérifier si le bot est mentionné
56
- if discord_client.user in message.mentions:
57
- try:
58
- # Extraire le contenu du message sans la mention
59
- clean_message = message.content.replace(f"<@{discord_client.user.id}>", "").strip()
60
-
61
- if not clean_message:
62
- await message.channel.send("Veuillez inclure un message après ma mention !")
63
- return
64
 
65
- # Obtenir la réponse du modèle
66
- async with message.channel.typing():
67
- response = await get_model_response(clean_message)
68
-
69
- # Diviser la réponse si elle dépasse la limite Discord (2000 caractères)
70
- if len(response) > 2000:
71
- parts = [response[i:i+2000] for i in range(0, len(response), 2000)]
72
- for part in parts:
73
- await message.channel.send(part)
74
- else:
75
- await message.channel.send(response)
76
-
77
- except Exception as e:
78
- await message.channel.send(f"Une erreur est survenue : {str(e)}")
 
 
79
 
80
- # Lancer le bot
81
  if __name__ == "__main__":
82
- # Vérifier que les tokens sont définis
83
- if TOKEN == "VOTRE_TOKEN_DISCORD" or HF_TOKEN == "VOTRE_TOKEN_HF":
84
- print("Veuillez remplacer les tokens par vos propres tokens Discord et Hugging Face")
85
- else:
86
- discord_client.run(TOKEN)
 
 
 
3
  from gradio_client import Client
4
  from huggingface_hub import InferenceClient
5
  import os
6
+ import logging
7
+ import gradio as gr
8
+ import threading
9
+
10
+ # Set up logging
11
+ logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')
12
 
13
  # Get tokens from environment variables (Hugging Face Spaces secrets)
14
  TOKEN = os.getenv("DISCORD_TOKEN")
 
18
  if not TOKEN or not HF_TOKEN:
19
  raise ValueError("DISCORD_TOKEN and HF_TOKEN must be set as environment variables in Spaces secrets")
20
 
21
+ # Set up Discord intents
22
  intents = discord.Intents.default()
23
  intents.message_content = True
24
 
25
+ # Initialize Discord client
26
+ client = discord.Client(intents=intents)
27
+
28
+ # Initialize Hugging Face Inference client
29
  hf_client = InferenceClient(api_key=HF_TOKEN)
30
 
31
+ # Function to process message and get response
32
+ async def get_ai_response(message_content):
33
+ try:
34
+ messages = [{ "role": "system", "content": "tu es \"orion\" une ia crée par ethan " },{"role": "user", "content": message_content}]
35
+ response = ""
36
+ stream = hf_client.chat.completions.create(
37
+ model="Qwen/Qwen2.5-72B-Instruct",
38
+ messages=messages,
39
+ temperature=0.5,
40
+ max_tokens=2048,
41
+ top_p=0.7,
42
+ stream=True
43
+ )
44
+ for chunk in stream:
45
+ # Safely handle the chunk content
46
+ try:
47
+ delta_content = chunk.choices[0].delta.content
48
+ if delta_content is not None: # Only append if content exists
49
+ response += delta_content
50
+ except (AttributeError, IndexError) as e:
51
+ logging.warning(f"Skipping invalid chunk: {e}")
52
+ continue
53
+ return response if response else "I couldn't generate a response."
54
+ except Exception as e:
55
+ logging.error(f"Error in get_ai_response: {e}")
56
+ return f"An error occurred: {str(e)}"
57
 
58
+ @client.event
59
  async def on_ready():
60
+ logging.info(f'We have logged in as {client.user}')
61
 
62
+ @client.event
63
  async def on_message(message):
64
+ if message.author == client.user:
 
65
  return
66
+ if client.user in message.mentions:
67
+ clean_message = message.content.replace(f"<@{client.user.id}>", "").strip()
68
+ if not clean_message:
69
+ await message.channel.send("Please provide some text for me to respond to!")
70
+ return
71
+ processing_message = await message.channel.send("Processing your request...")
72
+ response = await get_ai_response(clean_message)
73
+ if len(response) > 2000:
74
+ chunks = [response[i:i+2000] for i in range(0, len(response), 2000)]
75
+ await processing_message.delete()
76
+ for chunk in chunks:
77
+ await message.channel.send(chunk)
78
+ else:
79
+ await processing_message.edit(content=response)
80
 
81
+ @client.event
82
+ async def on_error(event, *args, **kwargs):
83
+ logging.error(f"An error occurred: {event}")
84
+ with open('error.log', 'a') as f:
85
+ f.write(f"{event}\n")
 
 
 
 
86
 
87
+ # Function to run the Discord bot in a separate thread
88
+ def run_discord_bot():
89
+ try:
90
+ logging.info("Starting the Discord bot...")
91
+ client.run(TOKEN)
92
+ except Exception as e:
93
+ logging.error(f"Failed to start bot: {e}")
94
+ with open('error.log', 'a') as f:
95
+ f.write(f"Failed to start bot: {e}\n")
96
+
97
+ # Gradio interface to keep the Space alive
98
+ def create_interface():
99
+ invite_url = "Add this bot to your server by following this URL: https://discord.com/oauth2/authorize?client_id=1347943651363389440&permissions=515396454464&integration_type=0&scope=bot
100
+ with gr.Blocks(title="Discord Bot Invite") as demo:
101
+ gr.Markdown(f"# Discord Bot\n{invite_url}")
102
+ return demo
103
 
 
104
  if __name__ == "__main__":
105
+ # Start the Discord bot in a separate thread
106
+ bot_thread = threading.Thread(target=run_discord_bot, daemon=True)
107
+ bot_thread.start()
108
+
109
+ # Launch the Gradio interface
110
+ interface = create_interface()
111
+ interface.launch(server_name="0.0.0.0", server_port=7860)