File size: 1,295 Bytes
ee6f18e
 
 
 
 
 
 
 
 
d7ed0bf
ee6f18e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7ed0bf
ee6f18e
 
 
d7ed0bf
ee6f18e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from transformers import AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
import gradio as gr

#Setting device to cuda
torch.set_default_device("cuda")


model = "deepapaikar/katzbot-phi2"

pipeline = transformers.pipeline(
    "text-generation",
    model=model,
    torch_dtype=torch.float16,
    device_map="auto",
)

tokenizer = AutoTokenizer.from_pretrained(model)


def predict_answer(question, token=25):
    
    messages = [{"role": "user", "content": f"{question}"}]
    
    
    prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    
    
    outputs = pipeline(prompt, max_new_tokens=token, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
    
    return outputs[0]["generated_text"]



def gradio_predict(question, token):
    answer = predict_answer(question, token)
    return answer

# Define the Gradio interface
iface = gr.Interface(
    fn=gradio_predict,
    inputs=[gr.Textbox(label="Question", placeholder="e.g. Where is Yeshiva University located?", scale=4),
           gr.Slider(2, 100, value=25, label="Token Count", info="Choose between 2 and 100")],
    outputs=gr.TextArea(label="Answer"),
    title="KatzBot",
    description="Phi2-trial1",
)

# Launch the app
iface.queue().launch(debug=True)