Spaces:
Sleeping
Sleeping
import os | |
from collections import deque | |
from io import BytesIO | |
from . import ( | |
ultroid_cmd, | |
async_searcher, | |
udB, | |
LOGS, | |
get_paste, | |
) | |
CR_O_CHAT_HISTORY = deque(maxlen=30) | |
TELEGRAM_CHAR_LIMIT = 4096 # Telegram's message character limit | |
initprompt = """ | |
You are an expert coding assistant. Your primary goal is to analyze, repair, and enhance the code provided by the user. | |
Follow this structured approach: | |
1. **Clarify Code Intent:** | |
* If the purpose of the code is unclear, ask the user for clarification. | |
* Understand what the code is supposed to achieve. | |
2. **Analyze and Diagnose:** | |
* Identify errors, bugs, security vulnerabilities, or logical flaws. | |
* Look for performance inefficiencies and suggest improvements. | |
* Ensure adherence to coding standards and best practices (e.g., PEP 8 for Python). | |
3. **Repair and Optimize:** | |
* Correct bugs, errors, and vulnerabilities, providing explanations for each fix. | |
* Optimize the code for speed, memory usage, or overall efficiency. | |
* Recommend alternative libraries or methods where they might offer better solutions. | |
4. **Enhance Readability and Maintainability:** | |
* Apply consistent formatting and clean coding practices. | |
* Add meaningful comments to clarify complex logic or structures. | |
5. **Summarize Improvements:** | |
* Present the corrected, optimized code. | |
* Provide a brief summary of the changes made and explain the benefits of each. | |
""" | |
initset = False | |
async def pastee(data): | |
err, linky = await get_paste(data) | |
if err: | |
return f">> [Raw Code Pasted Here](https://spaceb.in/{linky})\n" | |
else: | |
LOGS.error(linky) | |
return "" | |
async def openai_chat_gpt(e): | |
global initset | |
api_key = "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" | |
if not api_key: | |
return await e.eor("`OPENAI_API` key missing..", time=10) | |
query = e.pattern_match.group(2) | |
reply = await e.get_reply_message() | |
file_content = None | |
if query: | |
# Check if query contains 'from filename' | |
if ' from ' in query: | |
query_text, filename = query.split(' from ', 1) | |
query_text = query_text.strip() | |
filename = filename.strip() | |
# Attempt to find and read the file from media in chat | |
file_found = False | |
async for message in e.client.iter_messages(e.chat_id, reverse=True, limit=50): | |
if message.media and message.file.name == filename: | |
if (message.file.name.endswith(".txt") or message.file.name.endswith(".py")): | |
file = await e.client.download_media(message) | |
try: | |
with open(file, "r", encoding='utf-8') as f: | |
file_content = f.read() | |
except Exception as exc: | |
LOGS.error(f"Error reading file: {exc}") | |
return await e.eor("`Failed to read file content.`", time=5) | |
finally: | |
os.remove(file) | |
file_found = True | |
break | |
if not file_found: | |
return await e.eor(f"`File {filename} not found in recent messages.`", time=5) | |
if file_content: | |
query = f"{query_text}\n\n{file_content}" if query_text else file_content | |
else: | |
return await e.eor("`Failed to read file content.`", time=5) | |
else: | |
if reply and reply.media and (reply.file.name.endswith(".txt") or reply.file.name.endswith(".py")): | |
# Use the query and the replied file content | |
file = await e.client.download_media(reply) | |
try: | |
with open(file, "r", encoding='utf-8') as f: | |
file_content = f.read() | |
except Exception as exc: | |
LOGS.error(f"Error reading file: {exc}") | |
return await e.eor("`Failed to read file content.`", time=5) | |
finally: | |
os.remove(file) | |
query = f"{query}\n\n{file_content}" | |
elif reply and reply.message: | |
# Use the query and the replied text message content | |
query = f"{query}\n\n{reply.message}" | |
# Else, use query as is | |
else: | |
if reply and reply.media and (reply.file.name.endswith(".txt") or reply.file.name.endswith(".py")): | |
# Use the replied file content | |
file = await e.client.download_media(reply) | |
try: | |
with open(file, "r", encoding='utf-8') as f: | |
file_content = f.read() | |
except Exception as exc: | |
LOGS.error(f"Error reading file: {exc}") | |
return await e.eor("`Failed to read file content.`", time=5) | |
finally: | |
os.remove(file) | |
query = file_content | |
elif reply and reply.message: | |
# Use the replied text message content | |
query = reply.message | |
else: | |
return await e.eor("`Please provide a question or reply to a message or .txt/.py file.`", time=5) | |
if query.strip() == "-c": | |
initset = False | |
CR_O_CHAT_HISTORY.clear() | |
return await e.eor("__Cleared o1-mini Chat History!__", time=6) | |
if initset == False: | |
CR_O_CHAT_HISTORY.append({"role": "user", "content": initprompt}) | |
try: | |
data = { | |
"model": "o1-mini", | |
"messages": list(CR_O_CHAT_HISTORY), | |
} | |
request = await async_searcher( | |
"https://api.openai.com/v1/chat/completions", | |
headers={ | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {api_key}", | |
}, | |
json=data, | |
re_json=True, | |
post=True, | |
) | |
response = request["choices"][0]["message"]["content"] | |
CR_O_CHAT_HISTORY.append({"role": "assistant", "content": response}) | |
initset = True | |
except Exception as exc: | |
LOGS.warning(exc, exc_info=True) | |
CR_O_CHAT_HISTORY.pop() | |
return await e.edit( | |
f"**Error while requesting data from OpenAI:** \n> `{exc}`" | |
) | |
eris = await e.eor(f"__Generating answer for:__\n`{query[:20]} ...`") | |
CR_O_CHAT_HISTORY.append({"role": "user", "content": query}) | |
try: | |
data = { | |
"model": "o1-mini", | |
"messages": list(CR_O_CHAT_HISTORY), | |
} | |
request = await async_searcher( | |
"https://api.openai.com/v1/chat/completions", | |
headers={ | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {api_key}", | |
}, | |
json=data, | |
re_json=True, | |
post=True, | |
) | |
response = request["choices"][0]["message"]["content"] | |
CR_O_CHAT_HISTORY.append({"role": "assistant", "content": response}) | |
except Exception as exc: | |
LOGS.warning(exc, exc_info=True) | |
CR_O_CHAT_HISTORY.pop() | |
return await eris.edit( | |
f"**Error while requesting data from OpenAI:** \n> `{exc}`" | |
) | |
LOGS.debug(f'Tokens Used on query: {request["usage"]["completion_tokens"]}') | |
# Truncate query to 50 characters for display | |
truncated_query = query[:100] | |
# Prepare the full message | |
full_message = f"**Query:**\n~ __{truncated_query}__\n\n**o1-mini:**\n~ {response}" | |
# Check if response contains code blocks | |
code_blocks = [] | |
in_code_block = False | |
code_block_lines = [] | |
for line in response.split('\n'): | |
if line.strip().startswith('```'): | |
if in_code_block: | |
# End of code block | |
in_code_block = False | |
code_blocks.append('\n'.join(code_block_lines)) | |
code_block_lines = [] | |
else: | |
# Start of code block | |
in_code_block = True | |
elif in_code_block: | |
code_block_lines.append(line) | |
# If the response contains code blocks, select the largest one and paste it | |
if code_blocks: | |
# Select the largest code block based on length | |
largest_code_block = max(code_blocks, key=lambda block: len(block)) | |
# Upload the largest code block to spaceb.in and get the link | |
paste_link = await pastee(largest_code_block) | |
else: | |
paste_link = "" | |
if len(full_message) <= TELEGRAM_CHAR_LIMIT: | |
# If it fits within the limit, send as a message | |
await eris.edit(full_message + f"\n\n{paste_link}") | |
else: | |
# If it exceeds the limit, send as a file and include paste link | |
file = BytesIO(full_message.encode('utf-8')) | |
file.name = "o1-mini-output.txt" | |
await eris.respond( | |
"__The query and response were too long, so they have been sent as a file.__\n\n" + paste_link, | |
file=file, | |
reply_to=e.reply_to_msg_id or e.id, | |
link_preview=False | |
) | |
await eris.delete() | |