File size: 839 Bytes
e8e247e
ec7540b
 
 
 
 
 
081b46f
ec7540b
 
6f1af31
ec7540b
 
 
 
6f1af31
ec7540b
 
 
 
 
 
 
 
 
 
 
b934ee9
182a838
ec7540b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
import os
import json
import openai
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from run_llm import model_mapping, fastchat  # Import the necessary function from run_llm.py

# Set your OpenAI API key
openai.api_key = "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"

def generate_text(input_text, model, prompt_type):
    # Use the fastchat function from run_llm.py
    outputs = fastchat(input_text, model, prompt_type)
    return outputs

iface = gr.Interface(
    fn=generate_text,
    inputs=[
        gr.Textbox("input_text", label="Input Text"),
        gr.Dropdown(
            list(model_mapping.keys()),
            label="Model"
        ),
        gr.Radio([1, 2], label="Prompt Type"),
    ],
    outputs=gr.Textbox("output_text", label="Generated Text")
)

iface.launch()