Anonymous
add files
208053f
raw
history blame
8.66 kB
import gradio as gr
import os
from openai import OpenAI
from generate_prompt import construct_generic_prompt, recommend_config
# Define available tasks and their corresponding datasets
QA = "QA"
SUMMARIZATION = "Summarization"
NLI = "NLI"
NER = "NER"
tasks_datasets = {
QA: ["XQuad", "Indicqa"],
SUMMARIZATION: ["XLSum", "HeSum"],
NLI: ["XNLI"],
NER: ["MasakaNER", "WikiANN"]
}
# List of all languages
languages = [
"English", "Spanish", "French", "German", "Chinese", "Japanese", "Korean", "Italian",
"Portuguese", "Russian", "Arabic", "Hindi", "Bengali", "Turkish", "Vietnamese", "Polish",
"Dutch", "Indonesian", "Malay", "Thai", "Greek", "Swedish", "Hungarian", "Finnish",
"Danish", "Norwegian", "Hebrew", "Czech", "Slovak", "Bulgarian", "Romanian", "Serbian",
"Croatian", "Ukrainian", "Lithuanian", "Latvian", "Estonian", "Filipino", "Icelandic",
"Irish", "Welsh", "Maltese", "Swahili", "Zulu", "Afrikaans"
]
def get_datasets(task):
return tasks_datasets.get(task, [])
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown("## Multilingual Prompt Generator")
with gr.Row():
with gr.Column(scale=2):
instruction = gr.Textbox(label="Instruction")
openai_key = gr.Textbox(label="OpenAI API key", type="password")
model = gr.Textbox(label="Model", placeholder="Enter model name (e.g., gpt-4-vision-preview)")
model_type = gr.Dropdown(label="Model Type", choices=["Multilingual", "English"], value='English')
config_recommendation = gr.Button("Recommend Configuration")
with gr.Column():
task = gr.Dropdown(label="Task", choices=list(tasks_datasets.keys()), value=QA)
language = gr.Dropdown(label="Source Language", choices=languages, value="English")
zero_shot = gr.Checkbox(label="Zero-shot", value=False)
with gr.Accordion("Prompt Configuration Selection", open=False):
prefix_selection = gr.Dropdown(["English", "Source"], label="prefix", value='English')
context_selection = gr.Dropdown(["English", "Source"], label="context", value='English')
examples_selection = gr.Dropdown(["English", "Source"], label="examples" , value='English')
output_selection = gr.Dropdown(["English", "Source"], label="output", value='English')
with gr.Accordion("Few Shot - Select Type of Examples ", open=False, visible=True) as few_shot:
dataset = gr.Dropdown(label="Dataset", choices=tasks_datasets[QA], value="XlSum")
num_examples = gr.Slider(label="Number of examples in context", minimum=1, maximum=10, step=1, value=3)
with gr.Row():
question = gr.Textbox(label="Question", visible=True)
context = gr.Textbox(label="Context", visible=True)
text = gr.Textbox(label="Text", visible=False)
sentence = gr.Textbox(label="Sentence", visible=False)
hypothesis = gr.Textbox(label="Hypothesis", visible=False)
premise = gr.Textbox(label="Premise", visible=False)
with gr.Row():
config_prompt = gr.Textbox(label="Recommended Configuration", interactive=False,
placeholder="Recommended Configuration for this scenerio")
generate_button = gr.Button("Generate Prompt")
with gr.Row():
prompt = gr.Textbox(label="Generated Prompt", interactive=False, placeholder="Generated prompt will appear here.")
def update_datasets(selected_task):
return gr.Dropdown(choices=get_datasets(selected_task))
def toggle_task_inputs(selected_task):
if selected_task == QA:
return (
gr.update(visible=True), gr.update(visible=True), gr.update(visible=False),
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
)
elif selected_task == SUMMARIZATION:
return (
gr.update(visible=False), gr.update(visible=False), gr.update(visible=True),
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
)
elif selected_task == NER:
return (
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
)
else:
return (
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
)
def toggle_num_examples(zero_shot_value):
# If zero_shot is True, hide the num_examples slider
return gr.update(visible=not zero_shot_value)
def update_language_selection(language):
return gr.update(choices=list({'English', language})), gr.update(choices=list({'English', language})), gr.update(choices=list({'English', language})), gr.update(choices=list({'English', language}))
def generatePrompt(instruction, num_examples, zero_shot,
task, selected_language, dataset, prefix_selection, context_selection, examples_selection, output_selection,
text, question, context, sentence, hypothesis, premise):
config = {'prefix': str.lower(prefix_selection), 'input': str.lower(context_selection), 'context': str.lower(examples_selection), 'output': str.lower(output_selection)}
if task == QA:
text_example = {
'context': context,
'question': question,
}
elif task == SUMMARIZATION:
text_example = {
'text': text,
}
elif task == NER:
text_example = {
'tokens': sentence,
}
else:
text_example = {
'hypothesis': hypothesis,
'premise': premise
}
print(text_example)
prompt = construct_generic_prompt(task, instruction, text_example, zero_shot, num_examples, selected_language, dataset, config)
return prompt
def respond(message, openai_key, url, chat_history, model, config_input, config_prefix, config_context,
config_output, task, dataset, language, num_examples, zero_shot):
os.environ["OPENAI_API_KEY"] = openai_key
client = OpenAI()
config = {
"input": config_input,
"prefix": config_prefix,
"context": config_context.split(', '),
"output": config_output,
"language": language,
"num_examples": num_examples,
"zero_shot": zero_shot
}
response = client.chat.completions.create(
model=model,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": message},
{"type": "image_url", "image_url": url},
{"type": "config", "config": config},
{"type": "task", "text": task},
{"type": "dataset", "text": dataset}
],
},
],
max_tokens=1000,
)
out = response.choices[0].message.content
chat_history.append((message, out))
return "", chat_history
# Bind functions to dropdown changes and button click
# task.change(fn=update_datasets, outputs=dataset)
language.change(fn=update_language_selection, inputs=language, outputs=[prefix_selection, context_selection, examples_selection, output_selection])
zero_shot.change(fn=toggle_num_examples, inputs=zero_shot, outputs=few_shot)
zero_shot.change(fn=toggle_num_examples, inputs=zero_shot, outputs=num_examples)
task.change(fn=update_datasets, inputs=task, outputs=dataset)
task.change(fn=toggle_task_inputs, inputs=task, outputs=[
question, context, text, sentence, hypothesis, premise,
])
generate_button.click(
generatePrompt,
inputs=[
instruction, num_examples, zero_shot,
task, language, dataset, prefix_selection, context_selection, examples_selection, output_selection,
text, question, context, sentence, hypothesis, premise
],
outputs=[prompt]
)
config_recommendation.click(
recommend_config,
inputs=[
task,
language,
model_type
],
outputs=[config_prompt]
)
if __name__ == '__main__':
demo.launch()