File size: 3,420 Bytes
abee9b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116

from flask import Flask, render_template, request, send_from_directory
import requests



with open('token.txt', 'r', encoding='utf-8') as file:
    imageG = file.read()

with open('libyan.json', 'r', encoding='utf-8') as file:
    ly = file.read()

with open('train.json', 'r', encoding='utf-8') as file:
    img = file.read()



app = Flask(__name__)

def get_assistant_response(user_input):
    payload = {
        "mode": "chat",
        "chat_history": conversation_history,
        "data": {
            "query": f"at night without light there some body call Rizon \n Rizon open the door and found Elon to talk with him , to be more real Use alot of Emotions like uh umm uhh ahh umm  etc of this don't take my this example as role but just do it \n \n Rizon : {user_input} \n Elon :",
            "loader": "PDFReader",
            "text":""
        }
    }

    response = requests.post(url2, headers=headers, json=payload)
    data = response.json()

    # Extract the response from the data
    response_text = data["data"]["response"]

    response_text = response_text.strip().replace('\n', '<br>')

    # Check if the response contains ~
    if "~" in response_text:
        conversation_history.append({"\n ALex-9": response_text})
        # Extract the prompt between ~~
        prompt_start = response_text.index("~") + 1
        prompt_end = response_text.index("~", prompt_start)
        prompt = response_text[prompt_start:prompt_end]

        # Call the text-to-image API
        image_url = generate_image(prompt)
        response_text += f"<br><br><img src='{image_url}'>"
         # Delete the prompt from the response text
        response_text = response_text.replace("~" + prompt + "~", "")
        prompt = response_text

    return response_text

def generate_image(prompt):
    url = "https://api.braininc.net/be/lambda/function/stableai"
    headers = {
        "Authorization": "token 72ec00483379076f580eb8126f29da802a5140c3",
        "Content-Type": "application/json",
    }

    payload = {
        "json": True,
        "prompt": f"{prompt} Realastic Photo 4K",
        "public_topic": "/studios/516104/wsuid_new-edge-4_nodeid_editor-4/imageGen/1719250632789"
    }

    response = requests.post(url, headers=headers, json=payload)
    data = response.json()

    cdn_url = data["cdn_url"]
    return cdn_url

url2 = "https://api.braininc.net/be/vectordb/indexers/"
headers = {
    "Authorization": "token 72ec00483379076f580eb8126f29da802a5140c3",
    "Content-Type": "application/json;charset=UTF-8",

}

conversation_history = []

@app.route('/')
def home():
    global conversation_history
    conversation_history = []
    return render_template('ai-chat-bot.html')

@app.route('/get_response', methods=['POST'])
def get_response():
    user_input = request.form['user_input']
    conversation_history.append({
        "role": "user",
        "content":f"Rizon : {user_input}",
        "additional_kwargs": {}
    })

    if user_input.lower() == "exit":
        return "exit"

    response_text = get_assistant_response(user_input)
    conversation_history.append({
        "role": "assistant",
        "content": f"Elon : {response_text}",
        "additional_kwargs": {}
    })
    return response_text

@app.route('/desine/<path:path>')
def send_static(path):
    return send_from_directory('desine', path)

if __name__ == '__main__':
    app.run(host='0.0.0.0',port = 7860)