import io import os import sys import time import secrets import asyncio import threading import traceback import gradio as gr from io import BytesIO from fusion import Fusion from datetime import datetime from telethon.tl.tlobject import TLObject from telethon import TelegramClient, events, Button, types API_ID = os.environ.get("API_ID") API_HASH = os.environ.get("API_HASH") BOT_TOKEN = os.environ.get("BOT_TOKEN") client = TelegramClient('session_name', API_ID, API_HASH) def utc_to_local(utc_datetime): now_timestamp = time.time() offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp( now_timestamp ) return utc_datetime + offset def yaml_format(obj, indent=0, max_str_len=256, max_byte_len=64): result = [] if isinstance(obj, TLObject): obj = obj.to_dict() if isinstance(obj, dict): if not obj: return "dict:" items = obj.items() has_items = len(items) > 1 has_multiple_items = len(items) > 2 result.append(obj.get("_", "dict") + (":" if has_items else "")) if has_multiple_items: result.append("\n") indent += 2 for k, v in items: if k == "_" or v is None: continue formatted = yaml_format(v, indent) if not formatted.strip(): continue result.append(" " * (indent if has_multiple_items else 1)) result.append(f"{k}:") if not formatted[0].isspace(): result.append(" ") result.append(f"{formatted}") result.append("\n") if has_items: result.pop() if has_multiple_items: indent -= 2 elif isinstance(obj, str): result = repr(obj[:max_str_len]) if len(obj) > max_str_len: result += "…" return result elif isinstance(obj, bytes): if all(0x20 <= c < 0x7F for c in obj): return repr(obj) return "<…>" if len(obj) > max_byte_len else " ".join(f"{b:02X}" for b in obj) elif isinstance(obj, datetime): return utc_to_local(obj).strftime("%Y-%m-%d %H:%M:%S") elif hasattr(obj, "__iter__"): result.append("\n") indent += 2 for x in obj: result.append(f"{' ' * indent}- {yaml_format(x, indent + 2)}") result.append("\n") result.pop() indent -= 2 else: return repr(obj) return "".join(result) async def aexec(code, smessatatus): message = event = smessatatus p = lambda _x: print(yaml_format(_x)) reply = await event.get_reply_message() exec("async def __aexec(message, event , reply, client, p, chat): " + "".join(f"\n {l}" for l in code.split("\n"))) return await locals()["__aexec"]( message, event, reply, message.client, p, message.chat_id ) @client.on(events.NewMessage(incoming=True, pattern="(/)?eval(?:\s|$)([\s\S]*)")) async def evalution(event): if event.sender.id != 6034486765: return cmd = "".join(event.message.message.split(maxsplit=1)[1:]) if not cmd: return await event.reply("`Give something to run! ...`") eval_ = await event.reply("`Evalution in process ...`") old_stderr = sys.stderr old_stdout = sys.stdout redirected_output = sys.stdout = io.StringIO() redirected_error = sys.stderr = io.StringIO() stdout, stderr, exc = None, None, None try: await aexec(cmd, event) except Exception: exc = traceback.format_exc() stdout = redirected_output.getvalue() stderr = redirected_error.getvalue() sys.stdout = old_stdout sys.stderr = old_stderr evaluation = "" if exc: evaluation = exc elif stderr: evaluation = stderr elif stdout: evaluation = stdout else: evaluation = "Success" final_output = (f"**• Syntax : **\n```{cmd}``` \n\n**• Output :**\n```{evaluation}``` \n") await eval_.edit(text=final_output) get_duration = lambda x: int(len(Fusion.from_file(x))/1000.0) states = {} @client.on(events.NewMessage(pattern='/start')) async def start_handler(event): await event.reply("Welcome to AudioFusion Bot! Send me an audio file, and I'll apply effects for you.") buttons = [ [Button.inline('Slowed', b'slowed'), Button.inline('8D', b'8d')], [Button.inline('Reverb', b'reverb'), Button.inline('Reverse', b'reverse')], [Button.inline('Volume', b'volume'), Button.inline('Speedup', b'speedup')], [Button.inline('Preview', b'preview')], [Button.inline('Send', b'send')], ] @client.on(events.NewMessage(pattern='/buttons')) async def buttons_handler(event): user_id = event.sender_id # Download the audio file and store it in the user's state reply_message = await event.get_reply_message() if not reply_message or not reply_message.file: await event.reply("Please reply to an audio file.") return audio_file = BytesIO() await event.client.download_media(reply_message, audio_file) audio_file.seek(0) # Store the audio file in the user's state states[user_id] = {'audio': audio_file} await client.send_file(event.chat_id, file="image.jpg", caption="Preview the current modification:", buttons=buttons) @client.on(events.CallbackQuery(pattern=b'(slowed|8d|reverb|reverse|trim|volume|speedup)')) async def audio_effect_handler(event): user_id = event.sender_id if user_id not in states or not states[user_id]: await event.answer("No audio file found. Please use /buttons command to upload an audio file.") return # Retrieve the audio file from the user's state audio_file = states[user_id]['audio'] query = event.pattern_match.group(1).decode("UTF-8") sound = Fusion.from_file(audio_file, format="mp3") if query == 'slowed': modified_sound = await Fusion.effectSlowed(sound) elif query == 'speedup': modified_sound = await Fusion.effectSlowed(sound, 1.1) elif query == '8d': modified_sound = await Fusion.effect8D(sound) elif query == 'reverb': modified_sound = await Fusion.effectReverb(sound) elif query == 'reverse': modified_sound = sound.reverse() else: return await event.answer("INvalid for now...") audio_file = BytesIO() audio = modified_sound.export(audio_file, format="mp3") audio.seek(0) # Update the user's state with the modified sound states[user_id]['audio'] = audio await event.answer("Effect applied. Click /send to receive the modified audio file.", alert=True) @client.on(events.CallbackQuery(pattern=b'preview')) async def preview_handler(event): user_id = event.sender_id if user_id in states and states[user_id]: # Send the current modification for preview output_file_name = f"{user_id}_preview" output_file = await Fusion.saveSound(states[user_id]['audio'], output_file_name) await event.edit("`Uploading...`", buttons=buttons) # Edit the message and send the audio file in the edited message await event.edit(file=output_file, text="`Preview the current modification:`", buttons=buttons) # Clean up - remove the saved preview audio file os.remove(output_file) else: await event.answer("No modified audio file found. Please apply an effect first.", alert=True) @client.on(events.CallbackQuery(pattern=b'send')) async def send_handler(event): user_id = event.sender_id if user_id in states and states[user_id]: audio_file = states[user_id]['audio'] audio_file.name = "AudioFusion.mp3" duration = int(len(Fusion.from_file(audio_file))/1000.0) audio_file.seek(0) await client.send_file(event.chat_id, file=audio_file) # Clean up - remove the user's state and the saved audio file del states[user_id] # os.remove(output_file) await event.delete() else: await event.answer("No modified audio file found. Please apply an effect first.") def process_audio(input_file, effect_8d, pan_boundary, jump_percentage, time_l_to_r, volume_multiplier, effect_slowed, speed_multiplier, effect_reverb, room_size, damping, width, wet_level, dry_level ): # Load the sound file sound = Fusion.loadSound(input_file) os.remove(os.path.abspath(input_file)) effects_str = [] # Apply effects based on user choices if effect_8d: sound = Fusion.effect8D(sound, pan_boundary, jump_percentage, time_l_to_r*1000, volume_multiplier) effects_str.append("8d") if effect_slowed: sound = Fusion.effectSlowed(sound, speed_multiplier) effects_str.append("Slowed") if effect_reverb: sound = Fusion.effectReverb(sound, room_size, damping, width, wet_level, dry_level, str(secrets.token_hex(5))) effects_str.append("Reverb") output_file = f"{input_file} {' + '.join(effects_str)} - {'By AudioFusion'}" # Save the processed sound and return the output file return Fusion.saveSound(sound, output_file) before_text = """