msy127 commited on
Commit
7326dbf
ยท
1 Parent(s): 6783a3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -1,16 +1,16 @@
1
  # ๋ชจ๋ธ ๋กœ๋”ฉ
2
- import torch
3
- from peft import PeftConfig, PeftModel
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
- device = "cuda" if torch.cuda.is_available() else "cpu"
7
 
8
- base_model_name = "facebook/opt-350m"
9
- adapter_model_name = 'msy127/opt-350m-aihubqa-130-dpo-adapter'
10
 
11
- model = AutoModelForCausalLM.from_pretrained(base_model_name)
12
- model = PeftModel.from_pretrained(model, adapter_model_name).to(device)
13
- tokenizer = AutoTokenizer.from_pretrained(adapter_model_name)
14
 
15
  # ๋Œ€ํ™” ๋ˆ„์  ํ•จ์ˆ˜ (history) - prompt ์ž๋ฆฌ์— history๊ฐ€ ๋“ค์–ด๊ฐ -> dialoGPT๋Š” ๋ชจ๋ธ ์ง‘์–ด๋„ฃ๊ธฐ ์ „์— ์ธ์ฝ”๋”ฉ์„ ํ–ˆ์—ˆ๋Š”๋ฐ OPENAI๋Š” ์ธ์ฝ”๋”ฉ์„ ์•ˆํ•œ๋‹ค.
16
 
 
1
  # ๋ชจ๋ธ ๋กœ๋”ฉ
2
+ # import torch
3
+ # from peft import PeftConfig, PeftModel
4
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
7
 
8
+ # base_model_name = "facebook/opt-350m"
9
+ # adapter_model_name = 'msy127/opt-350m-aihubqa-130-dpo-adapter'
10
 
11
+ # model = AutoModelForCausalLM.from_pretrained(base_model_name)
12
+ # model = PeftModel.from_pretrained(model, adapter_model_name).to(device)
13
+ # tokenizer = AutoTokenizer.from_pretrained(adapter_model_name)
14
 
15
  # ๋Œ€ํ™” ๋ˆ„์  ํ•จ์ˆ˜ (history) - prompt ์ž๋ฆฌ์— history๊ฐ€ ๋“ค์–ด๊ฐ -> dialoGPT๋Š” ๋ชจ๋ธ ์ง‘์–ด๋„ฃ๊ธฐ ์ „์— ์ธ์ฝ”๋”ฉ์„ ํ–ˆ์—ˆ๋Š”๋ฐ OPENAI๋Š” ์ธ์ฝ”๋”ฉ์„ ์•ˆํ•œ๋‹ค.
16