WYBar commited on
Commit
b9f70fd
·
1 Parent(s): 9a8a8da

reset to resize

Browse files
Files changed (3) hide show
  1. app.py +7 -6
  2. app_test.py +5 -5
  3. modeling_crello.py +4 -3
app.py CHANGED
@@ -283,6 +283,7 @@ def buildmodel(**kwargs):
283
  # resume,
284
  # config=model_args
285
  # ).to(device)
 
286
  model = CrelloModel.from_pretrained(
287
  "WYBar/LLM_For_Layout_Planning",
288
  subfolder="checkpoint-26000", # 加载检查点目录
@@ -303,8 +304,8 @@ def buildmodel(**kwargs):
303
  def construction_layout():
304
  params_dict = {
305
  # 需要修改
306
- "input_model": "WYBar/LLM_For_Layout_Planning",
307
- "resume": "WYBar/LLM_For_Layout_Planning",
308
 
309
  "seed": 0,
310
  "mask_values": False,
@@ -320,10 +321,10 @@ def construction_layout():
320
  # Init model
321
  model, quantizer, tokenizer = buildmodel(**params_dict)
322
 
323
- print('resize token embeddings to match the tokenizer', 129423)
324
- model.lm.resize_token_embeddings(129423)
325
- model.input_embeddings = model.lm.get_input_embeddings()
326
- print('after token embeddings to match the tokenizer', 129423)
327
 
328
  print("before .to(device)")
329
  model = model.to(device)
 
283
  # resume,
284
  # config=model_args
285
  # ).to(device)
286
+
287
  model = CrelloModel.from_pretrained(
288
  "WYBar/LLM_For_Layout_Planning",
289
  subfolder="checkpoint-26000", # 加载检查点目录
 
304
  def construction_layout():
305
  params_dict = {
306
  # 需要修改
307
+ "input_model": "/openseg_blob/v-sirui/temporary/2024-02-21/Layout_train/COLEv2/Design_LLM/checkpoint/Meta-Llama-3-8B",
308
+ "resume": "/openseg_blob/v-sirui/temporary/2024-02-21/SVD/Int2lay_1016/checkpoint/int2lay_1031/1031_test/checkpoint-26000/",
309
 
310
  "seed": 0,
311
  "mask_values": False,
 
321
  # Init model
322
  model, quantizer, tokenizer = buildmodel(**params_dict)
323
 
324
+ # print('resize token embeddings to match the tokenizer', 129423)
325
+ # model.lm.resize_token_embeddings(129423)
326
+ # model.input_embeddings = model.lm.get_input_embeddings()
327
+ # print('after token embeddings to match the tokenizer', 129423)
328
 
329
  print("before .to(device)")
330
  model = model.to(device)
app_test.py CHANGED
@@ -283,7 +283,7 @@ def buildmodel(**kwargs):
283
  # resume,
284
  # config=model_args
285
  # ).to(device)
286
-
287
  model = CrelloModel.from_pretrained(
288
  "WYBar/LLM_For_Layout_Planning",
289
  subfolder="checkpoint-26000", # 加载检查点目录
@@ -321,10 +321,10 @@ def construction_layout():
321
  # Init model
322
  model, quantizer, tokenizer = buildmodel(**params_dict)
323
 
324
- print('resize token embeddings to match the tokenizer', 129423)
325
- model.lm.resize_token_embeddings(129423)
326
- model.input_embeddings = model.lm.get_input_embeddings()
327
- print('after token embeddings to match the tokenizer', 129423)
328
 
329
  print("before .to(device)")
330
  model = model.to(device)
 
283
  # resume,
284
  # config=model_args
285
  # ).to(device)
286
+
287
  model = CrelloModel.from_pretrained(
288
  "WYBar/LLM_For_Layout_Planning",
289
  subfolder="checkpoint-26000", # 加载检查点目录
 
321
  # Init model
322
  model, quantizer, tokenizer = buildmodel(**params_dict)
323
 
324
+ # print('resize token embeddings to match the tokenizer', 129423)
325
+ # model.lm.resize_token_embeddings(129423)
326
+ # model.input_embeddings = model.lm.get_input_embeddings()
327
+ # print('after token embeddings to match the tokenizer', 129423)
328
 
329
  print("before .to(device)")
330
  model = model.to(device)
modeling_crello.py CHANGED
@@ -181,9 +181,10 @@ class CrelloModel(PreTrainedModel):
181
  self.lm.train()
182
  self.lm.config.gradient_checkpointing = True
183
 
184
- # print('resize token embeddings to match the tokenizer', config.vocab_size)
185
- # self.lm.resize_token_embeddings(config.vocab_size)
186
- # self.input_embeddings = self.lm.get_input_embeddings()
 
187
 
188
  def train(self, mode=True):
189
  super().train(mode=mode)
 
181
  self.lm.train()
182
  self.lm.config.gradient_checkpointing = True
183
 
184
+ print('resize token embeddings to match the tokenizer', config.vocab_size)
185
+ self.lm.resize_token_embeddings(config.vocab_size)
186
+ self.input_embeddings = self.lm.get_input_embeddings()
187
+ print('after token embeddings to match the tokenizer', config.vocab_size)
188
 
189
  def train(self, mode=True):
190
  super().train(mode=mode)