File size: 3,269 Bytes
7aec585
cb35dc2
b5fc9a6
81721b3
cb35dc2
81721b3
41f1852
81721b3
fdb904d
41f1852
7aec585
fdb904d
7aec585
 
 
 
fdb904d
cb35dc2
 
fdb904d
cb35dc2
41f1852
 
 
 
cb35dc2
 
41f1852
 
 
cb35dc2
fdb904d
cb35dc2
 
 
 
 
fdb904d
cb35dc2
 
 
 
fdb904d
cb35dc2
 
 
b1a15f5
fdb904d
81721b3
cb35dc2
 
 
 
fdb904d
cb35dc2
81721b3
fdb904d
81721b3
d29ae30
b5fc9a6
 
cb35dc2
b5fc9a6
 
 
fdb904d
7aec585
fdb904d
cb35dc2
 
 
 
fdb904d
 
cb35dc2
fdb904d
cb35dc2
7aec585
cb35dc2
 
7aec585
cb35dc2
fdb904d
cb35dc2
 
fdb904d
cb35dc2
fdb904d
cb35dc2
b5fc9a6
fdb904d
cb35dc2
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import threading
import discord
import torch
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from dotenv import load_dotenv

# Load environment variables (from Hugging Face Secrets and .env if available)
load_dotenv()
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
HF_TOKEN = os.getenv("HF_TOKEN")  # Optional: only if needed for private models

if not DISCORD_TOKEN:
    raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.")

# Set the model repository name (public model)
MODEL_NAME = "agentica-org/DeepScaleR-1.5B-Preview"

# Load the tokenizer and model. Use token=HF_TOKEN if provided.
if HF_TOKEN:
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME, token=HF_TOKEN, torch_dtype=torch.float16, device_map="auto"
    )
else:
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME, torch_dtype=torch.float16, device_map="auto"
    )

# Function to generate AI responses
def generate_response(prompt):
    device = "cuda" if torch.cuda.is_available() else "cpu"
    inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to(device)
    outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, top_p=0.9, temperature=0.7)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    # Ensure the bot always identifies as "Shiv Yantra AI"
    response = response.replace("DeepScaleR", "Shiv Yantra AI")
    return response

# ==========================
# Gradio Interface (optional UI)
# ==========================
def gradio_api(input_text):
    return generate_response(input_text)

iface = gr.Interface(fn=gradio_api, inputs="text", outputs="text", title="Shiv Yantra AI")

def run_gradio():
    iface.launch(server_name="0.0.0.0", server_port=7860, share=False)

# ==========================
# Discord Bot Setup (Directly uses local generate_response)
# ==========================
intents = discord.Intents.default()
intents.message_content = True  # Required for reading message content
client = discord.Client(intents=intents)

@client.event
async def on_ready():
    print(f"Logged in as {client.user}")

@client.event
async def on_message(message):
    # Avoid replying to itself
    if message.author == client.user:
        return

    user_input = message.content.strip()
    if user_input:
        try:
            # Directly call the local generate_response function
            ai_response = generate_response(user_input)
        except Exception as e:
            ai_response = "Error processing your request."
        await message.channel.send(ai_response)

def run_discord_bot():
    client.run(DISCORD_TOKEN)

# ==========================
# Start Both Services Concurrently
# ==========================
if __name__ == "__main__":
    # Start the Gradio interface in a separate thread (optional UI)
    threading.Thread(target=run_gradio, daemon=True).start()
    # Start the Discord bot in a separate thread
    threading.Thread(target=run_discord_bot, daemon=True).start()

    # Keep the main thread alive indefinitely
    while True:
        pass