leonzoopl commited on
Commit
a3edc83
·
verified ·
1 Parent(s): 0e21da9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -8
app.py CHANGED
@@ -1,13 +1,26 @@
1
  import gradio as gr
2
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
 
3
 
4
- model_name = "speakleash/Bielik-1.5B-v3"
5
- tokenizer = GPT2Tokenizer.from_pretrained(model_name)
6
- model = GPT2LMHeadModel.from_pretrained(model_name)
7
 
8
- def generate_text(prompt):
9
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
10
- output = model.generate(input_ids, max_length=100, num_return_sequences=1)
 
 
 
 
 
 
 
 
 
 
 
 
11
  return tokenizer.decode(output[0], skip_special_tokens=True)
12
 
13
- gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Polish GPT-2 Demo").launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
 
5
+ # Nazwa modelu na Hugging Face
6
+ model_name = "speakleash/Bielik-1.5B-v3.0-Instruct-FP8-Dynamic"
 
7
 
8
+ # Ładowanie tokenizera i modelu
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float32)
11
+
12
+ # Funkcja generująca odpowiedź
13
+ def generate(prompt):
14
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
15
+ with torch.no_grad():
16
+ output = model.generate(
17
+ input_ids=input_ids,
18
+ max_new_tokens=150,
19
+ temperature=0.7,
20
+ top_p=0.95,
21
+ do_sample=True
22
+ )
23
  return tokenizer.decode(output[0], skip_special_tokens=True)
24
 
25
+ # Interfejs Gradio
26
+ gr.Interface