File size: 4,117 Bytes
2459c1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae27e6c
2459c1f
 
527456d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2459c1f
 
 
 
 
 
 
 
 
ae27e6c
2459c1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4add049
 
2459c1f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from transformers import pipeline, Conversation
import gradio as gr

from dotenv import load_dotenv

# Load environment variables from the .env file de forma local
load_dotenv()
import base64

with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
    encoded_image = base64.b64encode(image_file.read()).decode()

# chatbot = pipeline(model="microsoft/DialoGPT-medium")
# conversation = Conversation("Hi")
# response = chatbot(conversation)
# #conversation.mark_processed()
# #conversation.append_response(response)
# conversation.add_user_input("How old are you?")

# conversation2 = chatbot(conversation)
# print(conversation2)

# def respond(text, conversation):
#     chatbot = pipeline(model="microsoft/DialoGPT-medium")

#     if len(conversation)==0:
#         conversation = Conversation(text)
#         conversation = chatbot(conversation)
#         print(conversation.iter_texts())
#         # test = []
#         # for user,text in conversation.iter_texts():
            

#         return text, conversation.iter_texts()
#     else:
#         conversation.add_user_input(text)
#         conversation = chatbot(conversation)
#         return text, conversation.iter_texts()

import os
import openai

openai.api_key = os.environ['OPENAI_API_KEY']

def clear_chat(message, chat_history):
     return "", []

def add_new_message(message,chat_history):
     new_chat = []
     for turn in chat_history:
          user, bot = turn
          new_chat.append({"role": "user", "content": user})
          new_chat.append({"role": "assistant","content":bot})
     new_chat.append({"role": "user","content":message})
     return new_chat
    
def respond(message, chat_history):
    prompt = add_new_message(message, chat_history)
    # stream = client.generate_stream(prompt,
    #                                   max_new_tokens=1024,
    #                                   stop_sequences=["\nUser:", "<|endoftext|>"],
    #                                   temperature=temperature)
    #                                   #stop_sequences to not generate the user answer
    # acc_text = ""
    response = openai.ChatCompletion.create(
        model="gpt-4-1106-preview",
        messages= prompt,
        temperature=0.5,
        max_tokens=1000,
        stream = True
        )#.choices[0].message.content
    # chat_history.append((message, response))

    token_counter = 0 
    partial_words = "" 

    for chunk in response:
        chunk_message = chunk['choices'][0]['delta']
        if(len(chat_history))<1:
            # print("entró acaá")
            partial_words += chunk_message.content
            chat_history.append([message,chunk_message.content])
        else:
            # print("antes", chat_history)
            if(len(chunk_message)!=0):
                if(len(chunk_message)==2):
                    partial_words += chunk_message.content
                    chat_history.append([message,chunk_message.content])
                else:
                    partial_words += chunk_message.content
                    chat_history[-1] =([message,partial_words])
        yield "",chat_history

    # return "",chat_history

with gr.Blocks() as demo:
    gr.Markdown("""
    <center>
    <h1>
    Uso de AI para un chatbot.
    </h1>
    <img src='data:image/jpg;base64,{}' width=200px>
    <h3>
    En este espacio podrás interactuar con ChatGPT y su modelo GPT4!
    </h3>
    </center>
    """.format(encoded_image))
    with gr.Row():
        chatbot = gr.Chatbot() #just to fit the notebook
    with gr.Row():
        with gr.Row():
            with gr.Column(scale=4):
                msg = gr.Textbox(label="Texto de entrada")
            with gr.Column(scale=1):
                btn = gr.Button("Enviar")
                clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat")

    btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
    msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit
    clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])


demo.queue(concurrency_count=4)
demo.launch()