ChromiumPlutoniumAI commited on
Commit
1f319a5
·
verified ·
1 Parent(s): e959424

Create inference.py

Browse files
Files changed (1) hide show
  1. inference.py +18 -0
inference.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ def generate_text(prompt, max_length=100):
5
+ model = AutoModelForCausalLM.from_pretrained("./results")
6
+ tokenizer = AutoTokenizer.from_pretrained("./results")
7
+
8
+ inputs = tokenizer(prompt, return_tensors="pt")
9
+ outputs = model.generate(
10
+ **inputs,
11
+ max_length=max_length,
12
+ num_return_sequences=1,
13
+ temperature=0.7,
14
+ top_p=0.9,
15
+ do_sample=True
16
+ )
17
+
18
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)