CCCCCC commited on
Commit
e232fd5
·
1 Parent(s): 7671053

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -10,11 +10,9 @@ model_path = 'THUDM/BPO'
10
 
11
  device = 'cuda'
12
 
13
- if torch.cuda.is_available():
14
-
15
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, add_prefix_space=True)
16
- model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, device_map=device, load_in_8bit=True)
17
- model = model.eval()
18
 
19
 
20
  DESCRIPTION = """This Space demonstrates model [BPO](https://huggingface.co/THUDM/BPO), which is built on LLaMA-2-7b-chat.
@@ -137,7 +135,7 @@ def reset_state():
137
  def update_textbox_from_dropdown(selected_example):
138
  return selected_example
139
 
140
- with gr.Blocks("sty.css") as demo:
141
  gr.HTML("""<h1 align="center">Prompt Preference Optimizer</h1>""")
142
 
143
  gr.Markdown(DESCRIPTION)
 
10
 
11
  device = 'cuda'
12
 
13
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, add_prefix_space=True)
14
+ model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, device_map=device, load_in_8bit=True)
15
+ model = model.eval()
 
 
16
 
17
 
18
  DESCRIPTION = """This Space demonstrates model [BPO](https://huggingface.co/THUDM/BPO), which is built on LLaMA-2-7b-chat.
 
135
  def update_textbox_from_dropdown(selected_example):
136
  return selected_example
137
 
138
+ with gr.Blocks(css="sty.css") as demo:
139
  gr.HTML("""<h1 align="center">Prompt Preference Optimizer</h1>""")
140
 
141
  gr.Markdown(DESCRIPTION)