Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline | |
import argparse | |
from run_llm import main | |
# Initialize the GPT-2 pipeline | |
pipe = pipeline("text-generation", model="gpt2") | |
theme = gr.themes.Soft() | |
# Function that generates text based on instruction-based prompting | |
#def generate_text(input_instruction): | |
# # Use the input instruction to generate text | |
# generated_text = pipe(input_instruction, max_length=500)[0]['generated_text'] | |
# return generated_text | |
# Create a function that takes 3 inputs: | |
# - A prompt which will be a random string | |
# - From the first dropdown select the task (1,2,3) | |
# - From the second dropdown select the model type | |
# use run_llm.py to feed the models and then output 3 results in 3 output boxes, one for each strategy (strategy 1, 2 and 3) | |
def generate_text(prompt, task_number, model_type): | |
generated_text = pipe(prompt, max_length=500)[0]['generated_text'] | |
return generated_text | |
# Define example instructions for testing | |
instruction_examples = [ | |
["Describe the origin of the universe"], | |
["Explain the concept of artificial intelligence"], | |
["Describe the most common types of cancer"], | |
] | |
# Function that echoes the input text | |
#def echo_text(input_text): | |
# return input_text | |
with gr.Interface( | |
fn=generate_text, | |
inputs=[ | |
gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model"), | |
gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="task"), | |
gr.Textbox("", label="Enter Sentence", key="sentence", placeholder="Enter a sentence..."), | |
], | |
outputs=[ | |
gr.Textbox("", label="Strategy 1 Output", key="output_1", readonly=True), | |
gr.Textbox("", label="Strategy 2 Output", key="output_2", readonly=True), | |
gr.Textbox("", label="Strategy 3 Output", key="output_3", readonly=True), | |
], | |
examples=instruction_examples, | |
live=False, | |
title="LLM Evaluator with Linguistic Scrutiny", | |
theme=theme | |
) as iface: | |
iface.launch() | |