File size: 1,774 Bytes
e8e247e
081b46f
 
0fcffed
 
 
 
081b46f
0fcffed
 
081b46f
0fcffed
 
 
 
 
 
 
 
081b46f
0fcffed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98e913c
 
0fcffed
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import gradio as gr
import subprocess
from gradio.mix import Parallel
from transformers import AutoTokenizer, AutoModelForCausalLM
import os
import openai
import json

# Set your OpenAI API key
openai.api_key = "your_openai_api_key"

# Define a list of models you want to use
models = {
    "ChatGPT": "gpt3.5-turbo-0613",
    "LLaMA": "lmsys/llama-13b",
    "Vicuna": "lmsys/vicuna-13b-v1.3",
    "Alpaca": "lmsys/alpaca-7B",
    "Flan-T5": "lmsys/fastchat-t5-3b-v1.0",
}

# Define a function to run your `run_llm.py` script with the selected model
def run_llm(model, text, prompt_type):
    if "ChatGPT" in model:
        # Use your `run_llm.py` script for ChatGPT
        script = "python run_llm.py ... --model {} --text '{}' --prompt_type {}".format(
            model, text, prompt_type
        )
    else:
        # Use your `run_llm.py` script for other models
        script = "python run_llm.py ... --model {} --text '{}' --prompt_type {}".format(
            models[model], text, prompt_type
        )

    result = subprocess.check_output(script, shell=True, text=True)
    return result

# Create a Gradio interface for each model and each strategy
interfaces = {}
for model in models:
    for strategy in range(1, 4):
        name = f"{model} - Strategy {strategy}"
        interface = gr.Interface(
            fn=Parallel(
                run_llm, model=model, prompt_type=strategy
            ),
            inputs="textbox",
            outputs="text",
            title=name,
            live=True,
        )
        interfaces[name] = interface

if __name__ == "__main__":
    gr.Interface(
        [interfaces[name] for name in interfaces],
        title="LLM Strategies",
        live=True,
        share=True,
        server_port=7860,
    ).launch()