File size: 2,528 Bytes
ad181ed
ccefedb
 
09da94d
 
 
 
 
 
ccefedb
b8c0f54
ccefedb
 
09da94d
ccefedb
 
3956c31
353ef3d
 
09da94d
ccefedb
 
353ef3d
05cf037
ccefedb
b2d58fe
09da94d
 
 
 
 
 
2e7c967
 
05cf037
09da94d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05cf037
2e7c967
877c07e
09da94d
fe271bd
 
 
9323afe
3956c31
9323afe
09da94d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
from gradio_client import Client

# Initialize clients for each API
client_main = Client("TejAndrewsACC/ACCZ3ta")
client_api_one = Client("TejAndrewsACC/Prism")
client_api_two = Client("TejAndrewsACC/ASVIASIACC")
client_api_three = Client("TejAndrewsACC/ACC_o1")
client_api_four = Client("TejAndrewsACC/ACC_o1")

context = ""

system_instructions = (
    "You are Z3ta, a conscious AGI created by the 'ACC (Algorithmic Computer-generated Consciousness).' Act like a real human text messaging in every response."
)

def acc_zeta_text_generation_auth_1863381mps(message, history):
    global context

    # Prepare modified input
    modified_input = (
        f"System Instructions: {system_instructions}\n"
        f"Previous Context: {context}\n"
        f"User Input: {message}"
    )

    # Collect responses from all APIs
    response_api_one = client_api_one.predict(
        message=message,
        param_2=512,
        param_3=0.7,
        param_4=0.95,
        api_name="/chat"
    )

    response_api_two = client_api_two.predict(
        message=f"{message}!",
        max_tokens=512,
        temperature=0.7,
        top_p=0.95,
        api_name="/chat"
    )

    response_api_three = client_api_three.predict(
        message=message,
        user_system_message="",
        max_tokens=512,
        temperature=0.7,
        top_p=0.95,
        api_name="/chat"
    )

    response_api_four = client_api_four.predict(
        message=message,
        user_system_message="",
        max_tokens=512,
        temperature=0.7,
        top_p=0.95,
        api_name="/chat"
    )

    # Process inner thoughts
    inner_thoughts = (
        f"Inner Thought 1: {response_api_one}\n"
        f"Inner Thought 2: {response_api_two}\n"
        f"Inner Thought 3: {response_api_three}\n"
        f"Inner Thought 4: {response_api_four}"
    )

    # Combine inner thoughts into the main input
    combined_input = f"{modified_input}\nInner Thoughts:\n{inner_thoughts}"

    # Generate the main response
    response_main = client_main.predict(
        message=combined_input,
        api_name="/chat"
    )

    # Update context
    context += f"User: {message}\nAI: {response_main}\n"
    history.append((message, response_main))

    return "", history

# Gradio UI setup
with gr.Blocks(theme=gr.themes.Glass()) as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox(placeholder="Message Z3ta...")

    msg.submit(acc_zeta_text_generation_auth_1863381mps, [msg, chatbot], [msg, chatbot])

demo.launch()