rdlf commited on
Commit
65e2225
·
verified ·
1 Parent(s): 052012f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -17
app.py CHANGED
@@ -4,24 +4,30 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer
4
  tokenizer = GPT2Tokenizer.from_pretrained("sberbank-ai/mGPT")
5
  model = GPT2LMHeadModel.from_pretrained("sberbank-ai/mGPT")
6
 
7
- # Input text
8
- text = "Александр Сергеевич Пушкин родился в "
9
-
10
- # Encode the input text
11
- input_ids = tokenizer.encode(text, return_tensors="pt")
12
 
13
  # Generate text
14
- out = model.generate(
15
- input_ids,
16
- min_length=100,
17
- max_length=100,
18
- eos_token_id=5,
19
- pad_token_id=1,
20
- top_k=10,
21
- top_p=0.0,
22
- no_repeat_ngram_size=5
23
- )
24
 
25
  # Decode the generated output
26
- generated_text = list(map(tokenizer.decode, out))[0]
27
- print(generated_text)
 
 
 
 
 
 
 
 
 
4
  tokenizer = GPT2Tokenizer.from_pretrained("sberbank-ai/mGPT")
5
  model = GPT2LMHeadModel.from_pretrained("sberbank-ai/mGPT")
6
 
7
+ def eval_aguila(text):
8
+ # Encode the input text
9
+ input_ids = tokenizer.encode(text, return_tensors="pt")
 
 
10
 
11
  # Generate text
12
+ out = model.generate(
13
+ input_ids,
14
+ min_length=100,
15
+ max_length=100,
16
+ eos_token_id=5,
17
+ pad_token_id=1,
18
+ top_k=10,
19
+ top_p=0.0,
20
+ no_repeat_ngram_size=5
21
+ )
22
 
23
  # Decode the generated output
24
+ generated_text = list(map(tokenizer.decode, out))[0]
25
+ print(generated_text)
26
+
27
+
28
+ return(f"Result: {generation[0]['generated_text']}")
29
+
30
+
31
+ demo = gr.Interface(fn=lecturabilidad, inputs="text", outputs="text", title="Mixtral")
32
+
33
+ demo.launch(share=True)