File size: 4,081 Bytes
397b7de
 
 
 
 
9f1220f
 
 
 
 
 
397b7de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7ab984
397b7de
 
 
 
 
 
 
 
 
 
7f4b3a6
 
 
 
 
 
 
 
397b7de
 
7f4b3a6
397b7de
 
 
 
9f1220f
397b7de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f1220f
397b7de
 
 
9f1220f
 
397b7de
9f1220f
397b7de
 
9f1220f
397b7de
 
 
9f1220f
 
 
 
 
 
 
397b7de
9f1220f
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# bot.py
import discord
from gradio_client import Client
from huggingface_hub import InferenceClient
import os
import logging
import gradio as gr
import threading

# Set up logging
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')

# Get tokens from environment variables (Hugging Face Spaces secrets)
TOKEN = os.getenv("DISCORD_TOKEN")
HF_TOKEN = os.getenv("HF_TOKEN")

# Check if tokens are available
if not TOKEN or not HF_TOKEN:
    raise ValueError("DISCORD_TOKEN and HF_TOKEN must be set as environment variables in Spaces secrets")

# Set up Discord intents
intents = discord.Intents.default()
intents.message_content = True

# Initialize Discord client
client = discord.Client(intents=intents)

# Initialize Hugging Face Inference client
hf_client = InferenceClient(api_key=HF_TOKEN)

# Function to process message and get response
async def get_ai_response(message_content):
    try:
        messages = [{ "role": "system", "content": "tu es \"orion\" une ia crée par ethan " },{"role": "user", "content": message_content}]
        response = ""
        stream = hf_client.chat.completions.create(
            model="Qwen/Qwen2.5-72B-Instruct",
            messages=messages,
            temperature=0.5,
            max_tokens=2048,
            top_p=0.7,
            stream=True
        )
        for chunk in stream:
            # Safely handle the chunk content
            try:
                delta_content = chunk.choices[0].delta.content
                if delta_content is not None:  # Only append if content exists
                    response += delta_content
            except (AttributeError, IndexError) as e:
                logging.warning(f"Skipping invalid chunk: {e}")
                continue
        return response if response else "I couldn't generate a response."
    except Exception as e:
        logging.error(f"Error in get_ai_response: {e}")
        return f"An error occurred: {str(e)}"

@client.event
async def on_ready():
    logging.info(f'We have logged in as {client.user}')

@client.event
async def on_message(message):
    if message.author == client.user:
        return
    if client.user in message.mentions:
        clean_message = message.content.replace(f"<@{client.user.id}>", "").strip()
        if not clean_message:
            await message.channel.send("Please provide some text for me to respond to!")
            return
        processing_message = await message.channel.send("Processing your request...")
        response = await get_ai_response(clean_message)
        if len(response) > 2000:
            chunks = [response[i:i+2000] for i in range(0, len(response), 2000)]
            await processing_message.delete()
            for chunk in chunks:
                await message.channel.send(chunk)
        else:
            await processing_message.edit(content=response)

@client.event
async def on_error(event, *args, **kwargs):
    logging.error(f"An error occurred: {event}")
    with open('error.log', 'a') as f:
        f.write(f"{event}\n")

# Function to run the Discord bot in a separate thread
def run_discord_bot():
    try:
        logging.info("Starting the Discord bot...")
        client.run(TOKEN)
    except Exception as e:
        logging.error(f"Failed to start bot: {e}")
        with open('error.log', 'a') as f:
            f.write(f"Failed to start bot: {e}\n")

# Gradio interface to keep the Space alive
def create_interface():
    invite_url = "Add this bot to your server by following this URL: https://discord.com/oauth2/authorize?client_id=1347942347077582880&permissions=377957238784&integration_type=0&scope=bot"
    with gr.Blocks(title="Discord Bot Invite") as demo:
        gr.Markdown(f"# Discord Bot\n{invite_url}")
    return demo

if __name__ == "__main__":
    # Start the Discord bot in a separate thread
    bot_thread = threading.Thread(target=run_discord_bot, daemon=True)
    bot_thread.start()

    # Launch the Gradio interface
    interface = create_interface()
    interface.launch(server_name="0.0.0.0", server_port=7860)