File size: 761 Bytes
f80c0f1
 
 
 
 
 
 
5146cbb
f80c0f1
 
5146cbb
f80c0f1
 
 
 
 
 
 
 
5146cbb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import gradio as gr
from model import *

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

model = GPTLanguageModel().to(DEVICE)
model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False)
print("Model Loaded")
model.eval()
answer = decode(model.generate(context, max_new_tokens=1000)[0].tolist())
print("Answer Generated")

def display(text,number):
    combined_text = text + answer[:number + 1]
    return combined_text

input_box = gr.Textbox(label="Story Lines",value="Once Upon a Time")
input_slider = gr.Slider(minimum=500, maximum=1000, label="Select the maxium number of tokens/words:",step=100)
output_text = gr.Textbox() 
gr.Interface(fn=display, inputs=[input_box,input_slider], outputs=output_text).launch(debug=True)