Hardik5456's picture
Update app.py
a9684f4 verified
raw
history blame
3.17 kB
import os
import threading
import asyncio
import time
import discord
from dotenv import load_dotenv
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
# Load environment variables from Hugging Face Secrets and .env (if available)
load_dotenv()
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
if not DISCORD_TOKEN:
raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.")
# Model details: Using the Q6_K_L variant (recommended) from bartowski's GGUF collection.
MODEL_REPO = "bartowski/agentica-org_DeepScaleR-1.5B-Preview-GGUF"
MODEL_FILENAME = "agentica-org_DeepScaleR-1.5B-Preview-Q6_K_L.gguf"
MODEL_PATH = f"./{MODEL_FILENAME}"
# Download the model file if it doesn't exist locally.
if not os.path.exists(MODEL_PATH):
print("Model file not found locally. Downloading now...")
MODEL_PATH = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME)
print(f"Model downloaded to {MODEL_PATH}")
else:
print(f"Model found locally at {MODEL_PATH}")
# Initialize the model using llama-cpp-python.
# Adjust n_threads based on your available CPU cores.
print("Initializing model...")
llm = Llama(model_path=MODEL_PATH, n_threads=4)
print("Model initialization complete.")
# Define a function to generate responses using the model.
def generate_response(prompt):
try:
# Call the model: adjust max_tokens, temperature, and top_p as needed.
output = llm(prompt=prompt, max_tokens=200, temperature=0.7, top_p=0.9, echo=False)
response = output["text"]
# Replace any instance of the internal model name with your bot's identity.
response = response.replace("DeepScaleR", "Shiv Yantra AI")
return response
except Exception as e:
print(f"Error in generate_response: {e}")
return "Error processing your request."
# ----------------------------
# Discord Bot Setup
# ----------------------------
intents = discord.Intents.default()
intents.message_content = True # Required to read message contents
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print(f"Discord bot logged in as {client.user}")
@client.event
async def on_message(message):
# Avoid replying to itself.
if message.author == client.user:
return
user_input = message.content.strip()
if user_input:
try:
# Run the synchronous generate_response function in a separate thread.
ai_response = await asyncio.to_thread(generate_response, user_input)
except Exception as e:
print(f"Error during generation in on_message: {e}")
ai_response = "Error processing your request."
await message.channel.send(ai_response)
def run_discord_bot():
client.run(DISCORD_TOKEN)
# ----------------------------
# Start Services Concurrently
# ----------------------------
if __name__ == "__main__":
print("Starting Discord bot...")
threading.Thread(target=run_discord_bot, daemon=True).start()
print("Discord bot started. Keeping main thread alive.")
# Use a sleep loop to avoid busy-waiting.
while True:
time.sleep(60)