Hardik5456 commited on
Commit
cb35dc2
·
verified ·
1 Parent(s): 24e8003

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -25
app.py CHANGED
@@ -1,49 +1,93 @@
1
  import os
 
2
  import discord
 
3
  import torch
 
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
- from huggingface_hub import login
6
- from dotenv import load_dotenv
7
 
8
- # Load environment variables
9
- load_dotenv()
10
- HF_TOKEN = os.getenv("HF_TOKEN")
11
  DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
12
-
13
- if not HF_TOKEN:
14
- raise ValueError("Hugging Face token is missing. Set HF_TOKEN in the environment variables.")
15
 
16
  if not DISCORD_TOKEN:
17
  raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.")
18
 
19
- # Authenticate with Hugging Face
20
- login(HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- # Load DeepScaleR model and tokenizer
23
- MODEL_NAME = "Your_HuggingFace_Repo/DeepScaleR" # Replace with your model repo name
24
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
25
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16).to("cuda" if torch.cuda.is_available() else "cpu")
26
 
27
- # Initialize Discord bot
 
 
 
 
 
28
  intents = discord.Intents.default()
29
- intents.messages = True
30
  client = discord.Client(intents=intents)
31
 
 
 
 
32
  @client.event
33
  async def on_ready():
34
- print(f'Logged in as {client.user}')
35
 
36
  @client.event
37
  async def on_message(message):
38
  if message.author == client.user:
39
- return
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- input_text = message.content
42
- inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
43
- output = model.generate(**inputs, max_length=200)
44
- response = tokenizer.decode(output[0], skip_special_tokens=True)
45
 
46
- await message.channel.send(response)
 
 
 
 
 
 
 
47
 
48
- # Run Discord bot
49
- client.run(DISCORD_TOKEN)
 
 
1
  import os
2
+ import threading
3
  import discord
4
+ import requests
5
  import torch
6
+ import gradio as gr
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
8
 
9
+ # Load tokens from environment variables
 
 
10
  DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
11
+ HF_TOKEN = os.getenv("HF_TOKEN") # Optional: only needed if your model is private
 
 
12
 
13
  if not DISCORD_TOKEN:
14
  raise ValueError("Discord bot token is missing. Set DISCORD_TOKEN in the environment variables.")
15
 
16
+ # Use the official DeepScaleR model repository
17
+ MODEL_NAME = "agentica-org/DeepScaleR-1.5B-Preview"
18
+
19
+ # Load the model and tokenizer (if HF_TOKEN is provided, use it for authentication)
20
+ if HF_TOKEN:
21
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=HF_TOKEN)
22
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=HF_TOKEN, torch_dtype=torch.float16, device_map="auto")
23
+ else:
24
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
25
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")
26
+
27
+ # Define a function to generate a response using the model
28
+ def generate_response(prompt):
29
+ device = "cuda" if torch.cuda.is_available() else "cpu"
30
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to(device)
31
+ outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, top_p=0.9, temperature=0.7)
32
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ # Ensure the bot identifies as "Shiv Yantra AI"
34
+ response = response.replace("DeepScaleR", "Shiv Yantra AI")
35
+ return response
36
+
37
+ # ==========================
38
+ # Gradio API Setup
39
+ # ==========================
40
+ def gradio_api(input_text):
41
+ return generate_response(input_text)
42
 
43
+ iface = gr.Interface(fn=gradio_api, inputs="text", outputs="text")
 
 
 
44
 
45
+ def run_gradio():
46
+ iface.launch(server_name="0.0.0.0", server_port=7860, share=False)
47
+
48
+ # ==========================
49
+ # Discord Bot Setup
50
+ # ==========================
51
  intents = discord.Intents.default()
52
+ intents.message_content = True # Required for reading message content
53
  client = discord.Client(intents=intents)
54
 
55
+ # Use the local Gradio API endpoint (since both run in the same Space)
56
+ GRADIO_API_URL = "http://0.0.0.0:7860/run/predict"
57
+
58
  @client.event
59
  async def on_ready():
60
+ print(f"Logged in as {client.user}")
61
 
62
  @client.event
63
  async def on_message(message):
64
  if message.author == client.user:
65
+ return # Avoid replying to self
66
+
67
+ user_input = message.content.strip()
68
+ if user_input:
69
+ try:
70
+ payload = {"data": [user_input]}
71
+ r = requests.post(GRADIO_API_URL, json=payload)
72
+ r.raise_for_status()
73
+ response_json = r.json()
74
+ ai_response = response_json.get("data", ["Sorry, something went wrong."])[0]
75
+ except Exception as e:
76
+ ai_response = "Error contacting the AI API."
77
+ await message.channel.send(ai_response)
78
 
79
+ def run_discord_bot():
80
+ client.run(DISCORD_TOKEN)
 
 
81
 
82
+ # ==========================
83
+ # Start Both Services
84
+ # ==========================
85
+ if __name__ == "__main__":
86
+ # Start Gradio in a separate daemon thread
87
+ threading.Thread(target=run_gradio, daemon=True).start()
88
+ # Start Discord bot in another daemon thread
89
+ threading.Thread(target=run_discord_bot, daemon=True).start()
90
 
91
+ # Keep the main thread alive indefinitely
92
+ while True:
93
+ pass