TharunSiva commited on
Commit
971ab07
·
1 Parent(s): 1f156ff
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -5,13 +5,14 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
5
 
6
  model = GPTLanguageModel().to(DEVICE)
7
  model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False)
8
-
9
  answer = decode(model.generate(context, max_new_tokens=1000)[0].tolist())
10
 
11
- def display(number):
12
- return answer[:number+1]
 
13
 
 
14
  input_slider = gr.Slider(minimum=500, maximum=1000, default=500, label="Select the maxium number of tokens/words:")
15
  output_text = gr.Textbox()
16
- demo = gr.Interface(fn=display, inputs=input_slider, outputs=output_text)
17
- demo.launch()
 
5
 
6
  model = GPTLanguageModel().to(DEVICE)
7
  model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False)
8
+ model.eval()
9
  answer = decode(model.generate(context, max_new_tokens=1000)[0].tolist())
10
 
11
+ def display(text,number):
12
+ combined_text = text + answer[:number + 1]
13
+ return combined_text
14
 
15
+ input_box = gr.Textbox()
16
  input_slider = gr.Slider(minimum=500, maximum=1000, default=500, label="Select the maxium number of tokens/words:")
17
  output_text = gr.Textbox()
18
+ gr.Interface(fn=display, inputs=[input_box,input_slider], outputs=output_text).launch()