DarwinAnim8or commited on
Commit
9d294d5
·
1 Parent(s): d791c86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -22
app.py CHANGED
@@ -1,35 +1,36 @@
1
  import gradio as gr
 
 
2
 
3
- from happytransformer import HappyGeneration
4
-
5
- happy_gen = HappyGeneration("GPTNEO", "DarwinAnim8or/Pythia-Greentext-1.4b")
6
-
7
- from happytransformer import GENSettings
8
 
9
  def generate(text, length=100, penalty=3, temperature=0.8, topk=40):
10
- args_top_k = GENSettings(no_repeat_ngram_size=penalty, do_sample=True, top_k=topk, temperature=temperature, max_length=length, early_stopping=True)
11
-
12
- inputText = "Write a greentext from 4chan.org. The story should be like a bullet-point list using > as the start of each line. Most greentexts are humorous or absurd in nature. Most greentexts have a twist near the end.\n"
13
 
14
- if not text[0] == ">":
15
- inputText += ">" + text + "\n>"
16
  else:
17
- inputText += text + "\n>"
18
-
19
- print(inputText)
20
-
21
- result = happy_gen.generate_text(inputText, args=args_top_k)
22
- generated_text = result.text #returns generated text only
23
 
24
- #replace \n with actual newlines:
25
- generated_text = generated_text.replace('\\n', '\n')
26
 
27
- #clean up formatting:
28
- generated_text = generated_text.replace('>', '\n>')
29
- generated_text = generated_text.replace('\\"', "\"")
30
 
31
- generated_text = ">" + text + "\n>" + generated_text #include our prompt in our response (partially)
 
 
 
 
 
 
 
 
 
32
 
 
33
  return generated_text
34
 
35
  examples = [
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from optimum.intel import OVAutoModelForCasualLM
4
 
5
+ model_name = "DarwinAnim8or/Pythia-Greentext-1.4b"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = OVAutoModelForCausalLM.from_pretrained(model_name, export=True)
 
 
8
 
9
  def generate(text, length=100, penalty=3, temperature=0.8, topk=40):
10
+ input_text = "Write a greentext from 4chan.org. The story should be like a bullet-point list using > as the start of each line. Most greentexts are humorous or absurd in nature. Most greentexts have a twist near the end.\n"
 
 
11
 
12
+ if not text.startswith(">"):
13
+ input_text += ">" + text + "\n>"
14
  else:
15
+ input_text += text + "\n>"
 
 
 
 
 
16
 
17
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
18
+ input_ids = input_ids[:, :-1] # remove the last token, which is ">"
19
 
20
+ length = length + input_ids.size(1) # adjust total length
 
 
21
 
22
+ output = model.generate(
23
+ input_ids,
24
+ max_length=length,
25
+ temperature=temperature,
26
+ top_k=topk,
27
+ do_sample=True,
28
+ pad_token_id=tokenizer.eos_token_id,
29
+ no_repeat_ngram_size=penalty,
30
+ early_stopping=True,
31
+ )
32
 
33
+ generated_text = tokenizer.decode(output[:, input_ids.size(1):][0], skip_special_tokens=True)
34
  return generated_text
35
 
36
  examples = [