Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -5,6 +5,8 @@ import os
|
|
5 |
import asyncio
|
6 |
import json
|
7 |
from datetime import datetime
|
|
|
|
|
8 |
|
9 |
# Add these constants at the top
|
10 |
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo"
|
@@ -14,11 +16,30 @@ AI_HEADERS = {
|
|
14 |
"Accept": "application/json"
|
15 |
}
|
16 |
|
|
|
|
|
|
|
|
|
17 |
# Initialize message history in memory only
|
18 |
message_history = []
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
# Create a new Client instance with a custom session name and no local storage
|
21 |
-
|
22 |
"my_bot",
|
23 |
api_id=API_ID,
|
24 |
api_hash=API_HASH,
|
@@ -35,7 +56,7 @@ def save_message_to_history(user_id, username, message_type, content, bot_respon
|
|
35 |
print("Message History Update:", json.dumps(message_data, indent=2))
|
36 |
|
37 |
# Add command to print history
|
38 |
-
@
|
39 |
async def history_command(client, message):
|
40 |
if not message_history:
|
41 |
await message.reply_text("No message history available.")
|
@@ -56,12 +77,12 @@ async def history_command(client, message):
|
|
56 |
await message.reply_text(f"Error retrieving history: {str(e)}")
|
57 |
|
58 |
# Command handler for /start
|
59 |
-
@
|
60 |
async def start_command(client, message):
|
61 |
await message.reply_text("Hello! I'm your Telegram bot. Nice to meet you!")
|
62 |
|
63 |
# Command handler for /help
|
64 |
-
@
|
65 |
async def help_command(client, message):
|
66 |
help_text = """
|
67 |
Available commands:
|
@@ -71,7 +92,7 @@ Available commands:
|
|
71 |
await message.reply_text(help_text)
|
72 |
|
73 |
# Message handler for regular text messages
|
74 |
-
@
|
75 |
async def echo(client, message):
|
76 |
try:
|
77 |
thinking_msg = await message.reply_text("🤔 Thinking about your message...")
|
@@ -91,7 +112,7 @@ async def echo(client, message):
|
|
91 |
await message.reply_text(f"Sorry, I couldn't process your message: {str(e)}")
|
92 |
|
93 |
# Handle photo messages
|
94 |
-
@
|
95 |
async def handle_photo(client, message):
|
96 |
response = "Nice photo!"
|
97 |
await message.reply_text(response)
|
@@ -104,7 +125,7 @@ async def handle_photo(client, message):
|
|
104 |
)
|
105 |
|
106 |
# Handle sticker messages
|
107 |
-
@
|
108 |
async def handle_sticker(client, message):
|
109 |
response = "Cool sticker!"
|
110 |
await message.reply_text(response)
|
@@ -117,7 +138,7 @@ async def handle_sticker(client, message):
|
|
117 |
)
|
118 |
|
119 |
# Custom command example
|
120 |
-
@
|
121 |
async def info_command(client, message):
|
122 |
user = message.from_user
|
123 |
info_text = f"""
|
@@ -133,7 +154,7 @@ async def transcribe_audio(file_path):
|
|
133 |
try:
|
134 |
with open(file_path, "rb") as f:
|
135 |
data = f.read()
|
136 |
-
response = requests.post(API_URL,
|
137 |
return response.json().get('text', 'Could not transcribe audio')
|
138 |
except Exception as e:
|
139 |
print(f"Error in transcription: {e}")
|
@@ -207,37 +228,39 @@ async def get_ai_response(text):
|
|
207 |
print(f"Error getting AI response: {str(e)}\nFull error: {repr(e)}")
|
208 |
return "Sorry, I couldn't process your message."
|
209 |
|
210 |
-
# Update the voice message handler with
|
211 |
-
@
|
212 |
async def handle_voice(client, message):
|
213 |
try:
|
214 |
-
# Send a processing message
|
215 |
processing_msg = await message.reply_text("🎵 Processing your voice message...")
|
216 |
|
217 |
-
# Download
|
218 |
-
|
219 |
-
for attempt in range(max_retries):
|
220 |
-
try:
|
221 |
-
message = await app.get_messages(
|
222 |
-
message.chat.id,
|
223 |
-
message.id
|
224 |
-
)
|
225 |
-
voice_file = await message.download()
|
226 |
-
break
|
227 |
-
except Exception as e:
|
228 |
-
if attempt == max_retries - 1:
|
229 |
-
raise e
|
230 |
-
await asyncio.sleep(1)
|
231 |
-
|
232 |
transcription = await transcribe_audio(voice_file)
|
233 |
await message.reply_text(f"🗣️ Transcription:\n\n{transcription}")
|
234 |
|
|
|
235 |
thinking_msg = await message.reply_text("🤔 Thinking about your message...")
|
236 |
ai_response = await get_ai_response(transcription)
|
237 |
await thinking_msg.delete()
|
238 |
await message.reply_text(ai_response)
|
239 |
|
240 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
save_message_to_history(
|
242 |
message.from_user.id,
|
243 |
message.from_user.username,
|
@@ -248,8 +271,9 @@ async def handle_voice(client, message):
|
|
248 |
|
249 |
# Clean up
|
250 |
try:
|
251 |
-
os.remove(voice_file)
|
252 |
await processing_msg.delete()
|
|
|
253 |
except:
|
254 |
pass
|
255 |
|
@@ -261,4 +285,4 @@ async def handle_voice(client, message):
|
|
261 |
# Run the bot
|
262 |
if __name__ == "__main__":
|
263 |
print("Bot is running...")
|
264 |
-
|
|
|
5 |
import asyncio
|
6 |
import json
|
7 |
from datetime import datetime
|
8 |
+
import edge_tts
|
9 |
+
import io
|
10 |
|
11 |
# Add these constants at the top
|
12 |
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo"
|
|
|
16 |
"Accept": "application/json"
|
17 |
}
|
18 |
|
19 |
+
# TTS settings
|
20 |
+
DEFAULT_VOICE = "en-IN-NeerjaNeural"
|
21 |
+
DEFAULT_RATE = "+25%"
|
22 |
+
|
23 |
# Initialize message history in memory only
|
24 |
message_history = []
|
25 |
|
26 |
+
async def text_to_speech(text, voice=DEFAULT_VOICE, rate=DEFAULT_RATE):
|
27 |
+
"""Convert text to speech and return the audio data"""
|
28 |
+
try:
|
29 |
+
communicate = edge_tts.Communicate(text, voice, rate=rate)
|
30 |
+
audio_data = bytes()
|
31 |
+
|
32 |
+
async for chunk in communicate.stream():
|
33 |
+
if chunk["type"] == "audio":
|
34 |
+
audio_data += chunk["data"]
|
35 |
+
|
36 |
+
return audio_data
|
37 |
+
except Exception as e:
|
38 |
+
print(f"Error in text-to-speech conversion: {str(e)}")
|
39 |
+
return None
|
40 |
+
|
41 |
# Create a new Client instance with a custom session name and no local storage
|
42 |
+
myaibot = Client(
|
43 |
"my_bot",
|
44 |
api_id=API_ID,
|
45 |
api_hash=API_HASH,
|
|
|
56 |
print("Message History Update:", json.dumps(message_data, indent=2))
|
57 |
|
58 |
# Add command to print history
|
59 |
+
@myaibot.on_message(filters.command("history"))
|
60 |
async def history_command(client, message):
|
61 |
if not message_history:
|
62 |
await message.reply_text("No message history available.")
|
|
|
77 |
await message.reply_text(f"Error retrieving history: {str(e)}")
|
78 |
|
79 |
# Command handler for /start
|
80 |
+
@myaibot.on_message(filters.command("start"))
|
81 |
async def start_command(client, message):
|
82 |
await message.reply_text("Hello! I'm your Telegram bot. Nice to meet you!")
|
83 |
|
84 |
# Command handler for /help
|
85 |
+
@myaibot.on_message(filters.command("help"))
|
86 |
async def help_command(client, message):
|
87 |
help_text = """
|
88 |
Available commands:
|
|
|
92 |
await message.reply_text(help_text)
|
93 |
|
94 |
# Message handler for regular text messages
|
95 |
+
@myaibot.on_message(filters.text & filters.private & ~filters.command(["start", "help", "info"]))
|
96 |
async def echo(client, message):
|
97 |
try:
|
98 |
thinking_msg = await message.reply_text("🤔 Thinking about your message...")
|
|
|
112 |
await message.reply_text(f"Sorry, I couldn't process your message: {str(e)}")
|
113 |
|
114 |
# Handle photo messages
|
115 |
+
@myaibot.on_message(filters.photo)
|
116 |
async def handle_photo(client, message):
|
117 |
response = "Nice photo!"
|
118 |
await message.reply_text(response)
|
|
|
125 |
)
|
126 |
|
127 |
# Handle sticker messages
|
128 |
+
@myaibot.on_message(filters.sticker)
|
129 |
async def handle_sticker(client, message):
|
130 |
response = "Cool sticker!"
|
131 |
await message.reply_text(response)
|
|
|
138 |
)
|
139 |
|
140 |
# Custom command example
|
141 |
+
@myaibot.on_message(filters.command("info"))
|
142 |
async def info_command(client, message):
|
143 |
user = message.from_user
|
144 |
info_text = f"""
|
|
|
154 |
try:
|
155 |
with open(file_path, "rb") as f:
|
156 |
data = f.read()
|
157 |
+
response = requests.post(API_URL,data=data)
|
158 |
return response.json().get('text', 'Could not transcribe audio')
|
159 |
except Exception as e:
|
160 |
print(f"Error in transcription: {e}")
|
|
|
228 |
print(f"Error getting AI response: {str(e)}\nFull error: {repr(e)}")
|
229 |
return "Sorry, I couldn't process your message."
|
230 |
|
231 |
+
# Update the voice message handler with TTS response
|
232 |
+
@myaibot.on_message(filters.voice | filters.audio)
|
233 |
async def handle_voice(client, message):
|
234 |
try:
|
|
|
235 |
processing_msg = await message.reply_text("🎵 Processing your voice message...")
|
236 |
|
237 |
+
# Download and process voice message
|
238 |
+
voice_file = await message.download()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
transcription = await transcribe_audio(voice_file)
|
240 |
await message.reply_text(f"🗣️ Transcription:\n\n{transcription}")
|
241 |
|
242 |
+
# Get AI response
|
243 |
thinking_msg = await message.reply_text("🤔 Thinking about your message...")
|
244 |
ai_response = await get_ai_response(transcription)
|
245 |
await thinking_msg.delete()
|
246 |
await message.reply_text(ai_response)
|
247 |
|
248 |
+
# Convert AI response to speech
|
249 |
+
processing_tts = await message.reply_text("🔊 Converting response to speech...")
|
250 |
+
audio_data = await text_to_speech(ai_response)
|
251 |
+
|
252 |
+
if audio_data:
|
253 |
+
# Create an in-memory file-like object
|
254 |
+
audio_file = io.BytesIO(audio_data)
|
255 |
+
audio_file.name = "response.mp3"
|
256 |
+
|
257 |
+
# Send audio directly from memory
|
258 |
+
await message.reply_voice(
|
259 |
+
audio_file,
|
260 |
+
caption="🎵 Voice response"
|
261 |
+
)
|
262 |
+
|
263 |
+
# Save to history
|
264 |
save_message_to_history(
|
265 |
message.from_user.id,
|
266 |
message.from_user.username,
|
|
|
271 |
|
272 |
# Clean up
|
273 |
try:
|
274 |
+
os.remove(voice_file) # Still need to remove the downloaded voice file
|
275 |
await processing_msg.delete()
|
276 |
+
await processing_tts.delete()
|
277 |
except:
|
278 |
pass
|
279 |
|
|
|
285 |
# Run the bot
|
286 |
if __name__ == "__main__":
|
287 |
print("Bot is running...")
|
288 |
+
myaibot.run()
|