Hardik5456 commited on
Commit
09d5523
·
verified ·
1 Parent(s): 4a56061

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -25
app.py CHANGED
@@ -1,78 +1,86 @@
1
  import os
2
  import threading
3
  import asyncio
 
4
  import discord
5
  from dotenv import load_dotenv
6
  from huggingface_hub import hf_hub_download
7
  from llama_cpp import Llama
8
 
9
- # Load environment variables (from Hugging Face Secrets and .env if available)
10
  load_dotenv()
11
  DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
12
 
13
  if not DISCORD_TOKEN:
14
  raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.")
15
 
16
- # Set model details
17
  MODEL_REPO = "bartowski/agentica-org_DeepScaleR-1.5B-Preview-GGUF"
18
  MODEL_FILENAME = "agentica-org_DeepScaleR-1.5B-Preview-Q8_0.gguf"
19
  MODEL_PATH = f"./{MODEL_FILENAME}"
20
 
21
- # If the model file doesn't exist locally, download it.
22
  if not os.path.exists(MODEL_PATH):
23
- print("Model file not found, downloading...")
24
  MODEL_PATH = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME)
25
  print(f"Model downloaded to {MODEL_PATH}")
 
 
26
 
27
  # Initialize the model using llama-cpp-python.
28
- # Adjust n_threads if needed.
 
29
  llm = Llama(model_path=MODEL_PATH, n_threads=4)
 
30
 
31
  # Define a function to generate a response.
32
  def generate_response(prompt):
33
- output = llm(prompt=prompt, max_tokens=200, temperature=0.7, top_p=0.9, echo=False)
34
- response = output["text"]
35
- # Replace the internal model name with your bot's identity.
36
- response = response.replace("DeepScaleR", "Shiv Yantra AI")
37
- return response
 
 
 
 
 
38
 
39
- # ---------------------------
40
  # Discord Bot Setup
41
- # ---------------------------
42
  intents = discord.Intents.default()
43
- intents.message_content = True # Enable access to message content
44
  client = discord.Client(intents=intents)
45
 
46
  @client.event
47
  async def on_ready():
48
- print(f"Logged in as {client.user}")
49
 
50
  @client.event
51
  async def on_message(message):
52
- # Ignore messages from the bot itself.
53
  if message.author == client.user:
54
  return
55
-
56
  user_input = message.content.strip()
57
  if user_input:
58
  try:
59
- # Run generate_response in a separate thread so as not to block the Discord loop.
60
  ai_response = await asyncio.to_thread(generate_response, user_input)
61
  except Exception as e:
62
- print(f"Error during generation: {e}")
63
  ai_response = "Error processing your request."
64
  await message.channel.send(ai_response)
65
 
66
  def run_discord_bot():
67
  client.run(DISCORD_TOKEN)
68
 
69
- # ---------------------------
70
- # Start the Discord Bot
71
- # ---------------------------
72
  if __name__ == "__main__":
73
- # Run the Discord bot on a separate thread.
74
  threading.Thread(target=run_discord_bot, daemon=True).start()
75
-
76
- # Keep the main thread alive.
77
  while True:
78
- pass
 
1
  import os
2
  import threading
3
  import asyncio
4
+ import time
5
  import discord
6
  from dotenv import load_dotenv
7
  from huggingface_hub import hf_hub_download
8
  from llama_cpp import Llama
9
 
10
+ # Load environment variables from Hugging Face Secrets (.env file is optional)
11
  load_dotenv()
12
  DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
13
 
14
  if not DISCORD_TOKEN:
15
  raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.")
16
 
17
+ # Model details: using the quantized Q8 version from bartowski's repository.
18
  MODEL_REPO = "bartowski/agentica-org_DeepScaleR-1.5B-Preview-GGUF"
19
  MODEL_FILENAME = "agentica-org_DeepScaleR-1.5B-Preview-Q8_0.gguf"
20
  MODEL_PATH = f"./{MODEL_FILENAME}"
21
 
22
+ # Download the model file if it does not exist locally.
23
  if not os.path.exists(MODEL_PATH):
24
+ print("Model file not found locally. Downloading now...")
25
  MODEL_PATH = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME)
26
  print(f"Model downloaded to {MODEL_PATH}")
27
+ else:
28
+ print(f"Model found locally at {MODEL_PATH}")
29
 
30
  # Initialize the model using llama-cpp-python.
31
+ # Adjust n_threads based on available CPU cores.
32
+ print("Initializing model...")
33
  llm = Llama(model_path=MODEL_PATH, n_threads=4)
34
+ print("Model initialization complete.")
35
 
36
  # Define a function to generate a response.
37
  def generate_response(prompt):
38
+ try:
39
+ # Generate response using the quantized model.
40
+ output = llm(prompt=prompt, max_tokens=200, temperature=0.7, top_p=0.9, echo=False)
41
+ response = output["text"]
42
+ # Enforce your bot identity.
43
+ response = response.replace("DeepScaleR", "Shiv Yantra AI")
44
+ return response
45
+ except Exception as e:
46
+ print(f"Error in generate_response: {e}")
47
+ return "Error processing your request."
48
 
49
+ # ----------------------------
50
  # Discord Bot Setup
51
+ # ----------------------------
52
  intents = discord.Intents.default()
53
+ intents.message_content = True # Ensure we can read message contents
54
  client = discord.Client(intents=intents)
55
 
56
  @client.event
57
  async def on_ready():
58
+ print(f"Discord bot logged in as {client.user}")
59
 
60
  @client.event
61
  async def on_message(message):
62
+ # Skip bot's own messages.
63
  if message.author == client.user:
64
  return
 
65
  user_input = message.content.strip()
66
  if user_input:
67
  try:
68
+ # Run the generate_response in a separate thread to avoid blocking.
69
  ai_response = await asyncio.to_thread(generate_response, user_input)
70
  except Exception as e:
71
+ print(f"Error during generation in on_message: {e}")
72
  ai_response = "Error processing your request."
73
  await message.channel.send(ai_response)
74
 
75
  def run_discord_bot():
76
  client.run(DISCORD_TOKEN)
77
 
78
+ # ----------------------------
79
+ # Start Services Concurrently
80
+ # ----------------------------
81
  if __name__ == "__main__":
82
+ print("Starting Discord bot...")
83
  threading.Thread(target=run_discord_bot, daemon=True).start()
84
+ print("Discord bot started. Keeping main thread alive.")
 
85
  while True:
86
+ time.sleep(60)