|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import transformers |
|
import torch |
|
import gradio as gr |
|
|
|
|
|
torch.set_default_device("cuda") |
|
|
|
|
|
model = "deepapaikar/katzbot-phi2" |
|
|
|
pipeline = transformers.pipeline( |
|
"text-generation", |
|
model=model, |
|
torch_dtype=torch.float16, |
|
device_map="auto", |
|
) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model) |
|
|
|
|
|
def predict_answer(question, token=25): |
|
|
|
messages = [{"role": "user", "content": f"{question}"}] |
|
|
|
|
|
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
|
|
|
|
outputs = pipeline(prompt, max_new_tokens=token, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) |
|
|
|
return outputs[0]["generated_text"] |
|
|
|
|
|
|
|
def gradio_predict(question, token): |
|
answer = predict_answer(question, token) |
|
return answer |
|
|
|
|
|
iface = gr.Interface( |
|
fn=gradio_predict, |
|
inputs=[gr.Textbox(label="Question", placeholder="e.g. Where is Yeshiva University located?", scale=4), |
|
gr.Slider(2, 100, value=25, label="Token Count", info="Choose between 2 and 100")], |
|
outputs=gr.TextArea(label="Answer"), |
|
title="KatzBot", |
|
description="Phi2-trial1", |
|
) |
|
|
|
|
|
iface.queue().launch(debug=True) |
|
|