Datasets:

Modalities:
Text
Libraries:
Datasets
Jon Gauthier commited on
Commit
5c02ba4
·
1 Parent(s): 2603993

scaffold test script, partway there for actual prediction evaluation

Browse files
Files changed (1) hide show
  1. test.py +15 -1
test.py CHANGED
@@ -1,16 +1,30 @@
1
  import datasets
2
  import transformers
 
3
 
4
 
5
  dataset = datasets.load_dataset("syntaxgym", "mvrr_mod")
6
  metric = datasets.load_metric("syntaxgym", "mvrr_mod")
7
 
 
 
 
 
8
  model = transformers.AutoModelForCausalLM.from_pretrained("gpt2")
 
9
 
10
 
11
  for item in dataset["test"]:
12
- # TODO evaluate surprisals
 
 
13
 
14
  print(item)
 
 
 
 
15
 
 
 
16
  break
 
1
  import datasets
2
  import transformers
3
+ import torch
4
 
5
 
6
  dataset = datasets.load_dataset("syntaxgym", "mvrr_mod")
7
  metric = datasets.load_metric("syntaxgym", "mvrr_mod")
8
 
9
+ tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
10
+ # DEV
11
+ tokenizer.pad_token = tokenizer.eos_token
12
+
13
  model = transformers.AutoModelForCausalLM.from_pretrained("gpt2")
14
+ model.eval()
15
 
16
 
17
  for item in dataset["test"]:
18
+ # TODO full preprocessing setup
19
+ tokenized = tokenizer(item["conditions"]["content"], return_tensors="pt",
20
+ padding=True)
21
 
22
  print(item)
23
+ print(tokenized)
24
+
25
+ with torch.no_grad():
26
+ print(model(tokenized["input_ids"]))
27
 
28
+ # TODO eval
29
+
30
  break