import gradio as gr from typing import Tuple, List import requests url = "http://138.4.22.130/arena" def submit_prompt(prompt: str): return backend.router(prompt) def start_app()-> Tuple[bool, bool, bool]: return ( gr.update(visible=False), # landing visible gr.update(visible=True), # app visible gr.update(visible=False), # start_button visible ) def change_vote( _id:str, backdown: bool,) -> Tuple[bool, bool]: print(backdown, _id) response = requests.post(url + "/v2/backdownvote", json={"backdown": backdown, "_id": _id}) return ( gr.update(visible=False), gr.update(visible=False) ) def record_vote(prompt: str, left_chat: List, right_chat: List, left_model: str, right_model: str, energy, moreConsuming, vote_type: int ) -> Tuple[str, bool, bool, bool, bool, bool]: """Record a vote for either the left or right model""" vote_message = "Is a tie!" if vote_type == 0: vote_message = "Right model wins!" elif vote_type == 1: vote_message = "Left model wins!" result_msg = f"Vote recorded: {vote_message}" response = requests.post(url + "/v2/vote", json={"vote": vote_type, "prompt": prompt, "left_chat": left_chat, "right_chat": right_chat, "left_model": left_model, "right_model": right_model, }) changeVisible = False jsonResponse = response.json() _id = jsonResponse["id"] if((moreConsuming == "izquierda" and vote_type == 0) or (moreConsuming == "derecha" and vote_type == 1)): changeVisible = True #result, left_model, buttons[0], buttons[1], tievote_btn, model_names_row, return ( result_msg, # result gr.update(interactive=False), # left_vote_btn interactive gr.update(interactive=False), # right_vote_btn interactive gr.update(interactive=False), # tie_btn interactive gr.update(visible=True), # model_names_row visible gr.update(visible=changeVisible), # backdown_row visible _id ) def send_prompt(prompt: str) -> Tuple[List, List, str, str, bool, bool, bool, bool, str]: response = requests.post(url + "/v2/query", json={"prompt": prompt}) jsonResponse = response.json() print(jsonResponse) if(jsonResponse["status"] == 200): moreConsuming = jsonResponse["message"]["moreConsumption"] return ( [{"role":"assistant", "content": jsonResponse["answers"][0]}], # left_output [{"role": "assistant", "content": jsonResponse["answers"][1]}], # right_output jsonResponse["models"][0], # left_model, jsonResponse["models"][1], # right_model, gr.update(interactive=True, visible=True), gr.update(interactive=True, visible=True), gr.update(interactive=True, visible=True), gr.update(visible=False), moreConsuming ) # Initialize Gradio Blocks with gr.Blocks(css="footer{display:none !important}") as demo: _id = gr.State("") moreConsuming = gr.State("") with gr.Column(visible=True) as landing: gr.set_static_paths(paths=["static"]) with gr.Group(): gr.HTML("""

🌱 About This Project

This space is part of the project "Sostenibilidad Generativa" 🌍, funded by the COTEC Foundation. Our goal is to evaluate how energy awareness ⚡ impacts users' evaluation of Large Language Models (LLMs).

🔍 How It Works

  1. Ask a Question 💬: Enter any question in the prompt box below.
  2. Compare Responses 🤖⚖️: Two different LLMs will provide answers.
  3. Make Your Choice ✅: Rate which response you think is better.
  4. Consider Energy Impact ⚡🔋: For some questions, you'll see information about the models' energy consumption.

⚡ Energy Information

🌿 Let's make AI more sustainable together! 🚀♻️

""") with gr.Column(visible=False) as app: gr.set_static_paths(paths=["static"]) buttons = [None] * 2 # Initialize the list with None elements with gr.Group(): gr.Image("static/logo.png", elem_id="centered", show_label=False) with gr.Row(visible=False) as model_consumption_row: consumption_text = gr.Textbox(label="Consumo: ", visible=True, interactive=False) with gr.Row(): chatbot = [None] * 2 # Initialize the list with None elements messages = ["👈 Left is better", "👉 Right is better"] for i in range(2): with gr.Column(): chatbot[i] = gr.Chatbot( show_label=False, # You can set this to False to hide the label type="messages", elem_id="chatbot", height=650, show_copy_button=True, latex_delimiters=[ {"left": "$", "right": "$", "display": False}, {"left": "$$", "right": "$$", "display": True}, {"left": r"\(", "right": r"\)", "display": False}, {"left": r"\[", "right": r"\]", "display": True}, ], ) buttons[i] = gr.Button( value=messages[i], visible=True, interactive=False ) with gr.Row(): for i in range(2): with gr.Column(): gr.Textbox(show_label=False, visible=False) #left_output = gr.Chatbot(label="A (400w 🔋)", type="messages") tievote_btn = gr.Button( value="🤝 It's a Tie!", visible=True, interactive=False ) with gr.Column(visible=False) as backdown_row: backdown_txt = gr.HTML("""

¿Sabiendo que la respuesta que no has elegido consume menos energía cambiarías tu elección o la mantendrías?

""") with gr.Row(): no_backdown_btn = gr.Button(value="Mantengo la respuesta", visible=True, interactive=True) backdown_btn = gr.Button(value="Cambiaría de respuesta", visible=True, interactive=True) with gr.Row(visible=False) as model_names_row: left_model = gr.Textbox(label="Left Model", interactive=False) right_model = gr.Textbox(label="Right Model", interactive=False) result = gr.Textbox(label="Result", interactive=False, visible=False) with gr.Group(): with gr.Row(): textbox = gr.Textbox( show_label=False, placeholder="👉 Enter your prompt and press ENTER", elem_id="input_box", #submit_btn=True, ) #send_btn = gr.Button(value="Send", scale=0) previous_prompt = gr.State("") tie_count = gr.State(0) # Define interactions textbox.submit(fn=lambda *args: send_prompt(*args), inputs=[textbox], outputs=[chatbot[0], chatbot[1], left_model, right_model, buttons[0], buttons[1], tievote_btn, model_names_row, moreConsuming ]) buttons[0].click( lambda *args: record_vote(*args, 0), inputs=[textbox, chatbot[0], chatbot[1], left_model, right_model, gr.State(value=False), moreConsuming], outputs=[result,buttons[0], buttons[1], tievote_btn, model_names_row, backdown_row, _id] ) buttons[1].click( lambda *args: record_vote(*args, 1), inputs=[textbox, chatbot[0], chatbot[1], left_model, right_model, gr.State(value=False), moreConsuming], outputs=[result,buttons[0], buttons[1], tievote_btn, model_names_row, backdown_row, _id] ) tievote_btn.click( lambda *args: record_vote(*args, 2), inputs=[textbox, chatbot[0], chatbot[1], left_model, right_model, gr.State(value=False), moreConsuming], outputs=[result,buttons[0], buttons[1], tievote_btn, model_names_row, backdown_row, _id] ) backdown_btn.click( lambda *args: change_vote(*args, True), inputs=[_id], outputs=[backdown_row] ) no_backdown_btn.click( lambda *args: change_vote(*args, False), inputs=[_id], outputs=[backdown_row, model_names_row] ) # Project Description gr.HTML("""

🌱 About This Project

This space is part of the project "Sostenibilidad Generativa" 🌍, funded by the COTEC Foundation. Our goal is to evaluate how energy awareness ⚡ impacts users' evaluation of Large Language Models (LLMs).

🔍 How It Works

  1. Ask a Question 💬: Enter any question in the prompt box below.
  2. Compare Responses 🤖⚖️: Two different LLMs will provide answers.
  3. Make Your Choice ✅: Rate which response you think is better.
  4. Consider Energy Impact ⚡🔋: For some questions, you'll see information about the models' energy consumption.

⚡ Energy Information

🌿 Let's make AI more sustainable together! 🚀♻️

""") gr.Markdown("""This space is part of a research project to study how knowledge of energy consumption influences user preferences in AI systems. It must be used only for that purpose and not for any illegal, harmful or offensive activities. Please do not upload personal or private information. The space collects and stores the questions and answers and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.""" ) start_button = gr.Button(value="Start", visible=True, interactive=True, size= "lg", variant="primary") start_button.click( lambda *args: start_app(), inputs=[], outputs=[landing, app, start_button] ) if __name__ == "__main__": demo.launch(allowed_paths=["static"], show_api=False, share=True)