TharunSiva commited on
Commit
2c759c2
·
1 Parent(s): 971ab07
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,13 +6,13 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
6
  model = GPTLanguageModel().to(DEVICE)
7
  model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False)
8
  model.eval()
9
- answer = decode(model.generate(context, max_new_tokens=1000)[0].tolist())
10
 
11
  def display(text,number):
12
  combined_text = text + answer[:number + 1]
13
  return combined_text
14
 
15
  input_box = gr.Textbox()
16
- input_slider = gr.Slider(minimum=500, maximum=1000, default=500, label="Select the maxium number of tokens/words:")
17
  output_text = gr.Textbox()
18
  gr.Interface(fn=display, inputs=[input_box,input_slider], outputs=output_text).launch()
 
6
  model = GPTLanguageModel().to(DEVICE)
7
  model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False)
8
  model.eval()
9
+ answer = decode(model.generate(context, max_new_tokens=3000)[0].tolist())
10
 
11
  def display(text,number):
12
  combined_text = text + answer[:number + 1]
13
  return combined_text
14
 
15
  input_box = gr.Textbox()
16
+ input_slider = gr.Slider(minimum=500, maximum=2000, default=500, label="Select the maxium number of tokens/words:")
17
  output_text = gr.Textbox()
18
  gr.Interface(fn=display, inputs=[input_box,input_slider], outputs=output_text).launch()