File size: 3,951 Bytes
f1cf8a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d815a4
f1cf8a0
9f9f985
2d815a4
9f9f985
f1cf8a0
 
 
 
 
 
 
2d815a4
 
f1cf8a0
 
 
 
 
 
 
a38b919
f1cf8a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f8b80e
f1cf8a0
 
 
 
2d815a4
a08e60c
bcc988d
9f9f985
2d815a4
9399459
2d815a4
 
2b311e0
f1cf8a0
2d815a4
 
f1cf8a0
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import openai
from transformers import pipeline, Conversation
import gradio as gr
import json
from dotenv import load_dotenv

# Load environment variables from the .env file de forma local
load_dotenv()
import base64

with open("Iso_Logotipo_Ceibal.png", "rb") as image_file:
    encoded_image = base64.b64encode(image_file.read()).decode()


openai.api_key = os.environ['OPENAI_API_KEY']

def clear_chat(message, chat_history):
     return "", []

def add_new_message(message, questions_guide, chat_history):
     new_chat = []
     
     new_chat.append({"role": "system", "content": '{}'.format(questions_guide)})
   
     for turn in chat_history:
          user, bot = turn
          new_chat.append({"role": "user", "content": user})
          new_chat.append({"role": "assistant","content":bot})
     new_chat.append({"role": "user","content":message})
     return new_chat
    
def respond(message, questions_guide, chat_history):
    prompt = add_new_message(message, questions_guide, chat_history)
    # stream = client.generate_stream(prompt,
    #                                   max_new_tokens=1024,
    #                                   stop_sequences=["\nUser:", "<|endoftext|>"],
    #                                   temperature=temperature)
    #                                   #stop_sequences to not generate the user answer
    # acc_text = ""
    response = openai.ChatCompletion.create(
        model="gpt-4-0125-preview",
        messages= prompt,
        temperature=0.5,
        max_tokens=1000,
        stream=True,
        )#.choices[0].message.content
    #chat_history.append((message, response))

    token_counter = 0 
    partial_words = "" 

    counter=0
    for chunk in response:
        chunk_message = chunk['choices'][0]['delta']
        if(len(chat_history))<1:
            # print("entr贸 aca谩")
            partial_words += chunk_message.content
            chat_history.append([message,chunk_message.content])
        else:
            # print("antes", chat_history)
            if(len(chunk_message)!=0):
                if(len(chunk_message)==2):
                    partial_words += chunk_message.content
                    chat_history.append([message,chunk_message.content])
                else:
                    partial_words += chunk_message.content
                    chat_history[-1] =([message,partial_words])
        yield "",chat_history


with gr.Blocks() as demo:
    gr.Markdown("""
    <center>
    <img src='data:image/jpg;base64,{}' width=200px>
    <h3>
    Este espacio permite generar preguntas sobre el texto de referencia.
    </h3>
    </center>
    """.format(encoded_image))
    with gr.Row():
        questions_guide = gr.Textbox(label="Indicar aqu铆 la gu铆a para generar las preguntas:", value="En base al texto o novela que recibas como entrada, deber谩s generar preguntas orientadas para estudiantes escolares entre 8 y 12 a帽os. La idea es que las preguntas sirvan para evaluar la comprensi贸n lectora de los estudiantes. Debes generar 10 preguntas m煤ltiple opci贸n, en orden creciente de dificultad, donde cada una de ellas tiene tres opciones y la opci贸n correcta debe estar indicada con una X al comienzo.")
    with gr.Row():
        msg = gr.Textbox(label="Pegar aqu铆 el texto de referencia:")
    with gr.Row():
        with gr.Column(scale=4):        
            chatbot = gr.Chatbot(label="Resultado:",height=150, show_copy_button=True) #just to fit the notebook
        with gr.Column(scale=1):
            btn = gr.Button("Enviar")
            clear = gr.ClearButton(components=[msg, chatbot], value="Borrar resultado.")                

    btn.click(respond, inputs=[msg, questions_guide, chatbot], outputs=[msg, chatbot])
    msg.submit(respond, inputs=[msg, questions_guide, chatbot], outputs=[msg, chatbot]) #Press enter to submit
    clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot])
demo.queue()
demo.launch()