venkyyuvy commited on
Commit
7cfe36b
·
1 Parent(s): 6235c14
Files changed (3) hide show
  1. app.py +4 -2
  2. gpt.py +1 -3
  3. shakespeare.pt → shakespeare_lm.pt +0 -0
app.py CHANGED
@@ -1,19 +1,21 @@
1
  import pickle
2
  import torch
3
  import gradio as gr
4
- from gpt import GPTLanguageModel, encode, decode
5
 
6
 
7
  with open('stoi_itos.pkl', 'rb') as file:
8
  stoi, itos = pickle.load(file)
9
 
 
 
10
  lm = GPTLanguageModel()
11
  lm.load_state_dict(torch.load('shakespeare_lm.pt', map_location='cpu'))
12
  lm.eval()
13
 
14
 
15
  def inference(prompt: str):
16
- encoded_prompt = torch.tensor(encode(prompt), dtype=torch.long)
17
  output = decode(lm.generate(encoded_prompt, max_new_tokens=500)[0].tolist())
18
  return output
19
 
 
1
  import pickle
2
  import torch
3
  import gradio as gr
4
+ from gpt import GPTLanguageModel
5
 
6
 
7
  with open('stoi_itos.pkl', 'rb') as file:
8
  stoi, itos = pickle.load(file)
9
 
10
+ encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
11
+ decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
12
  lm = GPTLanguageModel()
13
  lm.load_state_dict(torch.load('shakespeare_lm.pt', map_location='cpu'))
14
  lm.eval()
15
 
16
 
17
  def inference(prompt: str):
18
+ encoded_prompt = torch.tensor(encode(prompt), dtype=torch.long).unsqueeze(0)
19
  output = decode(lm.generate(encoded_prompt, max_new_tokens=500)[0].tolist())
20
  return output
21
 
gpt.py CHANGED
@@ -9,9 +9,7 @@ from config import gpt_config as config
9
 
10
  torch.manual_seed(1337)
11
 
12
-
13
- encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
14
- decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
15
 
16
 
17
  class Head(nn.Module):
 
9
 
10
  torch.manual_seed(1337)
11
 
12
+ vocab_size = 65
 
 
13
 
14
 
15
  class Head(nn.Module):
shakespeare.pt → shakespeare_lm.pt RENAMED
File without changes