File size: 4,381 Bytes
732fabe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c259d8
 
732fabe
 
 
 
 
 
 
 
 
 
 
b46c083
22aff9e
 
 
 
1fc4fa4
 
97df647
 
 
 
 
 
 
 
 
 
b46c083
0d14b43
 
 
c81be17
 
 
 
b46c083
 
c81be17
b46c083
 
 
 
 
0d14b43
b46c083
 
0d14b43
 
b46c083
de7e62a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b46c083
de7e62a
 
 
 
d5c0a21
97df647
d5c0a21
 
 
97df647
bd49c1e
 
 
 
 
 
b46c083
d5c0a21
 
 
97df647
d5c0a21
 
 
 
 
 
 
 
 
b46c083
97df647
 
d5c0a21
b46c083
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2024 (c) Randy W @xtdevs, @xtsea
#
# from : https://github.com/TeamKillerX
# Channel : @RendyProjects
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.

import asyncio
import os
from pyrogram import *
from pyrogram import enums
from pyrogram import Client, filters
from pyrogram.types import *
from pyrogram.errors import *
from config import *

from database import db
from logger import LOGS
import datetime

from huggingface_hub import InferenceClient
from chatbot.plugins.user_database import users_collection
from chatbot.plugins.keyboards import get_language_keyboard
from chatbot.plugins.languages import LANGUAGES

from . import force_sub

SYSTEM_PROMPT = f"""
Your name is Randy Dev. A kind and friendly AI assistant that answers in a short and concise answer.
Give short step-by-step reasoning if required.

python language powered by @xtdevs on telegram support and language models Meta AI

{datetime.datetime.now()}
"""

async def process_stream(message, prompt):
    client_hf = InferenceClient(api_key=HF_KEY)
    # backup_chat = await db._get_openai_chat_from_db(message.from_user.id)
    # backup_chat.append({"role": "system", "content": SYSTEM_PROMPT})
    # backup_chat.append({"role": "user", "content": prompt})
    messages = [
        {"role": "system", "content": SYSTEM_PROMPT},
        {"role": "user", "content": prompt}
    ]
    stream = client_hf.chat.completions.create(
        model="mistralai/Mixtral-8x7B-Instruct-v0.1",
        messages=messages,
        max_tokens=500,
        stream=True
    )
    accumulated_text = ""
    for chunk in stream:
        LOGS.info(chunk)
        new_content = chunk.choices[0].delta.content
        accumulated_text += new_content
    # backup_chat.append({"role": "user", "content": accumulated_text})
    # await db._update_openai_chat_in_db(message.from_user.id, backup_chat)
    return accumulated_text

@Client.on_message(
    ~filters.scheduled
    & filters.command(["start"])
    & ~filters.forwarded
)
async def startbot(client: Client, message: Message):
    buttons = [
        [
            InlineKeyboardButton(
                text="Developer",
                url=f"https://t.me/xtdevs"
            ),
            InlineKeyboardButton(
                text="Channel",
                url='https://t.me/RendyProjects'
            ),
        ]
    ]
    await message.reply_text(
        text="Woohoo! Welcome! I'm excited to get started as a Meta AI bot!\n\n• Command /ask hello",
        disable_web_page_preview=True,
        reply_markup=InlineKeyboardMarkup(buttons)
    )

@Client.on_message(
    filters.private
    & filters.command(["ask"])
    & ~filters.forwarded
)
@force_sub
async def askcmd(client: Client, message: Message):
    if len(message.command) > 1:
        prompt = message.text.split(maxsplit=1)[1]
    elif message.reply_to_message:
        prompt = message.reply_to_message.text
    else:
        return await message.reply_text("Give ask from Meta AI")
    await client.send_chat_action(message.chat.id, enums.ChatAction.TYPING)
    await asyncio.sleep(1.5)
    try:
        output = await process_stream(message, prompt)
        if len(output) > 4096:
            with open("chat.txt", "w+", encoding="utf8") as out_file:
                out_file.write(output)
            await message.reply_document(
                document="chat.txt",
                disable_notification=True
            )
            os.remove("chat.txt")
        else:
            await message.reply_text(output, disable_web_page_preview=True)
        await client.send_chat_action(message.chat.id, enums.ChatAction.CANCEL)
        return
    except Exception as e:
        return await message.reply_text(f"Error: {e}")