Spaces:
Runtime error
Runtime error
import os | |
import threading | |
import asyncio | |
import time | |
import discord | |
from dotenv import load_dotenv | |
from huggingface_hub import hf_hub_download | |
from llama_cpp import Llama | |
# Load environment variables (from Hugging Face Secrets and .env if available) | |
load_dotenv() | |
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") | |
if not DISCORD_TOKEN: | |
raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.") | |
# Model details: Using the Q6_K_L variant from bartowski’s GGUF collection. | |
MODEL_REPO = "bartowski/agentica-org_DeepScaleR-1.5B-Preview-GGUF" | |
MODEL_FILENAME = "agentica-org_DeepScaleR-1.5B-Preview-Q6_K_L.gguf" | |
MODEL_PATH = f"./{MODEL_FILENAME}" | |
# Download the model file if it doesn't exist locally. | |
if not os.path.exists(MODEL_PATH): | |
print("Model file not found locally. Downloading now...") | |
MODEL_PATH = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME) | |
print(f"Model downloaded to {MODEL_PATH}") | |
else: | |
print(f"Model found locally at {MODEL_PATH}") | |
# Initialize the model using llama-cpp-python. | |
print("Initializing model...") | |
llm = Llama(model_path=MODEL_PATH, n_threads=4) | |
print("Model initialization complete.") | |
# Define a function to generate responses using the model. | |
def generate_response(prompt): | |
try: | |
# Reduce max_tokens to 50 to speed up generation. | |
output = llm(prompt=prompt, max_tokens=50, temperature=0.7, top_p=0.9, echo=False) | |
response = output["text"] | |
# Enforce bot identity: replace internal model name with "Shiv Yantra AI" | |
response = response.replace("DeepScaleR", "Shiv Yantra AI") | |
return response | |
except Exception as e: | |
print(f"Error in generate_response: {e}") | |
return "Error processing your request." | |
# ---------------------------- | |
# Discord Bot Setup | |
# ---------------------------- | |
intents = discord.Intents.default() | |
intents.message_content = True # Required to read message content | |
client = discord.Client(intents=intents) | |
async def on_ready(): | |
print(f"Discord bot logged in as {client.user}") | |
async def on_message(message): | |
if message.author == client.user: | |
return # Skip messages from the bot itself | |
user_input = message.content.strip() | |
if user_input: | |
try: | |
# Run generate_response in a separate thread to avoid blocking. | |
ai_response = await asyncio.to_thread(generate_response, user_input) | |
except Exception as e: | |
print(f"Error during generation in on_message: {e}") | |
ai_response = "Error processing your request." | |
await message.channel.send(ai_response) | |
def run_discord_bot(): | |
client.run(DISCORD_TOKEN) | |
# ---------------------------- | |
# Start Services Concurrently | |
# ---------------------------- | |
if __name__ == "__main__": | |
print("Starting Discord bot...") | |
threading.Thread(target=run_discord_bot, daemon=True).start() | |
print("Discord bot started. Keeping main thread alive.") | |
# Use a sleep loop instead of busy-waiting. | |
while True: | |
time.sleep(60) |