sergiopperez commited on
Commit
6a62ad4
Β·
1 Parent(s): acdb2ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -1,15 +1,22 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import pipeline, GPTJForCausalLM
4
 
5
- # load fp 16 model
6
- model = GPTJForCausalLM.from_pretrained("models/hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
7
 
8
- config = AutoConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", name_or_path="adapter_model.bin")
 
 
 
 
 
 
9
 
10
  # load tokenizer
11
  tokenizer = AutoTokenizer.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
12
 
 
 
 
13
  # create pipeline
14
  pipe = pipeline("text-generation", model=model, config=config, tokenizer=tokenizer, device=0,)
15
 
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import pipeline, GPTJForCausalLM, PeftModel, PeftConfig, AutoModelForCausalLM
4
 
 
 
5
 
6
+ config = PeftConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
7
+ model = AutoModelForCausalLM.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", return_dict=True, load_in_8bit=True, device_map='auto')
8
+
9
+ # # load fp 16 model
10
+ # model = GPTJForCausalLM.from_pretrained("models/hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
11
+
12
+ # config = AutoConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", name_or_path="adapter_model.bin")
13
 
14
  # load tokenizer
15
  tokenizer = AutoTokenizer.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
16
 
17
+ # Load the Lora model
18
+ model = PeftModel.from_pretrained(model, "hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
19
+
20
  # create pipeline
21
  pipe = pipeline("text-generation", model=model, config=config, tokenizer=tokenizer, device=0,)
22