File size: 6,921 Bytes
e0fa04b
 
 
 
 
 
 
bb04558
 
5bd4d70
5633cd6
cbec1bb
e0fa04b
 
bb04558
e0fa04b
 
 
 
 
15d0115
 
 
bb04558
 
 
778ded8
 
 
bb04558
02f58c6
bb04558
 
 
 
f51f1c6
1ce3dab
 
 
 
 
5633cd6
02f58c6
1ce3dab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02f58c6
 
5633cd6
 
02f58c6
5633cd6
 
 
c5802f0
a8c126e
02f58c6
c57b527
c5802f0
 
 
 
a8c126e
 
 
 
c5802f0
a8c126e
02f58c6
a8c126e
02f58c6
5633cd6
a8c126e
 
c5802f0
02f58c6
 
 
 
 
a063c0b
 
5633cd6
e6440a0
1ce3dab
 
15d0115
 
 
 
 
 
1ce3dab
15d0115
1ce3dab
 
15d0115
1ce3dab
15d0115
 
1ce3dab
15d0115
1ce3dab
 
15d0115
 
1ce3dab
15d0115
 
1ce3dab
15d0115
 
 
 
1ce3dab
 
15d0115
1ce3dab
15d0115
 
1ce3dab
 
15d0115
 
1ce3dab
15d0115
 
1ce3dab
d7eef58
 
 
 
 
 
e0fa04b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import discord
from discord import app_commands
import os
import requests
import asyncio
import aiohttp  # Use aiohttp for asynchronous HTTP requests
import gradio as gr  # Import Gradio
import google.generativeai as genai
from io import BytesIO
import PIL.Image
import base64
from typing import List
# --- Environment Variables & Setup ---
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
# --- Discord Bot Setup ---
intents = discord.Intents.default()
client = discord.Client(intents=intents)
tree = app_commands.CommandTree(client)

genai.configure(api_key=GEMINI_API_KEY)
model = genai.GenerativeModel("gemini-2.0-flash-exp")

if not DISCORD_BOT_TOKEN or not GEMINI_API_KEY:
    raise ValueError("Both DISCORD_BOT_TOKEN and GEMINI_API_KEY must be set.")

@tree.command(name="hello", description="Says hello!")
async def hello_command(interaction):
    await interaction.response.send_message("Hello there!")

@tree.command(name="gemini", description="Chat with Gemini")
async def generate_command(
    interaction: discord.Interaction,
    prompt: str,
):
    try:
        # Defer the interaction to allow time for processing
        await interaction.response.defer()

        genai.configure(api_key=GEMINI_API_KEY)
        model = genai.GenerativeModel("gemini-2.0-flash-exp")
        content = []  # List to store the content parts

        # Handle attachments if the command is used in reply to a message
        if interaction.message.reference:
            ref_message = await interaction.channel.fetch_message(interaction.message.reference.message_id)

            if ref_message.attachments:  # Check if the replied-to message has attachments
                for attachment in ref_message.attachments:
                    if not attachment.content_type or not attachment.content_type.startswith("image/"):
                        await interaction.followup.send(f"The attachment {attachment.filename} is not an image.", ephemeral=True)
                        continue

                    # Download each attachment
                    async with aiohttp.ClientSession() as session:
                        async with session.get(attachment.url) as resp:
                            if resp.status == 200:
                                image_bytes = await resp.read()
                                base64_image = base64.b64encode(image_bytes).decode("utf-8")
                                content.append({"mime_type": "image/jpeg", "data": base64_image})
                            else:
                                await interaction.followup.send(
                                    f"Failed to download image: {attachment.filename}, status code: {resp.status}"
                                )
                                return

        # Add the text prompt to the content
        content.append(prompt)

        # Call the Generative AI model
        response = model.generate_content(content, stream=True)

        current_message = None
        current_message_content = ""

        # Process streaming responses
        for part in response:
            text_chunk = part.text

            if len(current_message_content) + len(text_chunk) <= 2000:
                current_message_content += text_chunk
                if current_message:
                    await current_message.edit(content=current_message_content)
                else:
                    current_message = await interaction.followup.send(content=current_message_content)
            else:
                if current_message:
                    await current_message.edit(content=current_message_content)
                else:
                    await interaction.followup.send(content=current_message_content)
                current_message_content = text_chunk
                current_message = await interaction.followup.send(content=current_message_content)

        if current_message_content:
            if current_message:
                await current_message.edit(content=current_message_content)
            else:
                await interaction.followup.send(content=current_message_content)

    except Exception as e:
        print(e)
        await interaction.followup.send(f"An error occurred: {e}")

@tree.command(name="discribe", description="Process an image from a message")
async def process_image(interaction: discord.Interaction):
    try:
        # Ensure the command is replying to a message
        if not interaction.message.reference:
            await interaction.response.send_message("Please reply to a message with an image to use this command.", ephemeral=True)
            return

        # Fetch the original message being replied to
        ref_message = await interaction.channel.fetch_message(interaction.message.reference.message_id)

        # Check if the original message has attachments
        if not ref_message.attachments:
            await interaction.response.send_message("The replied-to message does not contain any attachments.", ephemeral=True)
            return

        # Process the first attachment (assuming it's an image)
        attachment = ref_message.attachments[0]
        if not attachment.content_type.startswith("image/"):
            await interaction.response.send_message("The attachment is not an image.", ephemeral=True)
            return

        # Defer the response to allow time for processing
        await interaction.response.defer()

        # Download and process the image as needed
        async with aiohttp.ClientSession() as session:
            async with session.get(attachment.url) as resp:
                if resp.status == 200:
                    image_bytes = await resp.read()
                    # Process the image bytes as needed
                    # For example, pass them to your AI model
                else:
                    await interaction.followup.send(f"Failed to download the image: {attachment.filename}")
                    return

        # After processing, send a response
        await interaction.followup.send("Image processed successfully.")

    except Exception as e:
        print(f"An error occurred: {e}")
        await interaction.followup.send(f"An error occurred: {e}")


async def on_ready():
    await tree.sync()
    print("Bot is ready!")


client.event(on_ready)

# --- Gradio Interface ---
def echo_text(text):
    return text


def run_gradio():
    gr.Interface(
        fn=echo_text,
        inputs="text",
        outputs="text",
        live=False,
        title="Minimal Gradio Interface",
    ).launch(server_name="0.0.0.0", server_port=7860, share=False, show_error=True)

async def main():
    bot_task = asyncio.create_task(client.start(DISCORD_BOT_TOKEN))
    gradio_task = asyncio.to_thread(run_gradio)

    await asyncio.gather(bot_task, gradio_task)


if __name__ == "__main__":
    asyncio.run(main())