import os import discord import torch from transformers import AutoModelForCausalLM, AutoTokenizer from huggingface_hub import login from dotenv import load_dotenv # Load environment variables load_dotenv() HF_TOKEN = os.getenv("HF_TOKEN") DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") if not HF_TOKEN: raise ValueError("Hugging Face token is missing. Set HF_TOKEN in the environment variables.") if not DISCORD_TOKEN: raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.") # Authenticate with Hugging Face login(HF_TOKEN) # Load DeepScaleR model and tokenizer MODEL_NAME = "Your_HuggingFace_Repo/DeepScaleR" # Replace with your model repo name tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16).to("cuda" if torch.cuda.is_available() else "cpu") # Initialize Discord bot intents = discord.Intents.default() intents.messages = True client = discord.Client(intents=intents) @client.event async def on_ready(): print(f'Logged in as {client.user}') @client.event async def on_message(message): if message.author == client.user: return input_text = message.content inputs = tokenizer(input_text, return_tensors="pt").to(model.device) output = model.generate(**inputs, max_length=200) response = tokenizer.decode(output[0], skip_special_tokens=True) await message.channel.send(response) # Run Discord bot client.run(DISCORD_TOKEN)