prithivMLmods commited on
Commit
cd0903f
·
verified ·
1 Parent(s): 482ab3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -18
app.py CHANGED
@@ -75,12 +75,12 @@ def orthogonalize_matrix(matrix, vec, weight):
75
  # --- Streamlit UI ---
76
 
77
  st.title("LLM Auto Abliteration")
78
- st.markdown("🥠Recommended to run edge-device LLMs (e.g., 1B, 1.5B, 0.5B).")
79
- st.markdown("🥠And also recommended to duplicate the space for seamless usage!")
80
- st.markdown("🥠This app allows you to manually input parameters to modify a language model's behavior by abliterating its weights.")
81
- st.markdown("📍Credits: Thanks to **[Maxime Labonne](https://huggingface.co/mlabonne)**")
82
 
83
- # Debugging window (will update logs during the process)
84
  debug_log = []
85
  debug_placeholder = st.empty()
86
  def update_debug(msg):
@@ -132,14 +132,20 @@ if st.button("Run Abliteration"):
132
 
133
  with st.spinner("Loading model..."):
134
  try:
 
135
  model = AutoModelForCausalLM.from_pretrained(
136
  MODEL_ID,
137
  device_map="auto",
138
  torch_dtype=torch_dtype,
139
  attn_implementation=attn_implementation,
140
- trust_remote_code=True
 
141
  ).eval()
142
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
 
 
 
 
143
  update_debug("Model and tokenizer loaded successfully.")
144
  except Exception as e:
145
  update_debug(f"Error loading model: {e}")
@@ -252,17 +258,16 @@ if st.button("Run Abliteration"):
252
  st.text_area("Response after abliteration:", response_after, height=150)
253
  update_debug("Generated response after abliteration.")
254
 
255
- st.write("### (Optional) Pushing Model to Hugging Face Hub")
256
- if st.checkbox("Push model to HF Hub?"):
257
- try:
258
- model_name = MODEL_ID.split("/")[-1] + "-abliterated"
259
- model.push_to_hub(model_name, private=PRIVATE_UPLOAD)
260
- tokenizer.push_to_hub(model_name, private=PRIVATE_UPLOAD)
261
- st.success(f"Model pushed as {model_name}")
262
- update_debug(f"Model pushed to HF Hub as {model_name}.")
263
- except Exception as e:
264
- st.error(f"Error while pushing model: {e}")
265
- update_debug(f"Error while pushing model: {e}")
266
 
267
  st.success("Abliteration process complete!")
268
  update_debug("Abliteration process complete.")
 
75
  # --- Streamlit UI ---
76
 
77
  st.title("LLM Auto Abliteration")
78
+ st.markdown("🥠 Recommended for edge-device LLMs (e.g., 1B, 1.5B, 0.5B).")
79
+ st.markdown("🥠 Duplicate the space for seamless usage!")
80
+ st.markdown("🥠 This app allows you to manually input parameters to modify a language model's behavior by abliterating its weights.")
81
+ st.markdown("📍 Credits: Thanks to **[Maxime Labonne](https://huggingface.co/mlabonne)**")
82
 
83
+ # Debugging window to show log messages
84
  debug_log = []
85
  debug_placeholder = st.empty()
86
  def update_debug(msg):
 
132
 
133
  with st.spinner("Loading model..."):
134
  try:
135
+ # Pass HF token if provided to load private or restricted models
136
  model = AutoModelForCausalLM.from_pretrained(
137
  MODEL_ID,
138
  device_map="auto",
139
  torch_dtype=torch_dtype,
140
  attn_implementation=attn_implementation,
141
+ trust_remote_code=True,
142
+ use_auth_token=hf_token if hf_token else None
143
  ).eval()
144
+ tokenizer = AutoTokenizer.from_pretrained(
145
+ MODEL_ID,
146
+ trust_remote_code=True,
147
+ use_auth_token=hf_token if hf_token else None
148
+ )
149
  update_debug("Model and tokenizer loaded successfully.")
150
  except Exception as e:
151
  update_debug(f"Error loading model: {e}")
 
258
  st.text_area("Response after abliteration:", response_after, height=150)
259
  update_debug("Generated response after abliteration.")
260
 
261
+ st.write("### Pushing Model to Hugging Face Hub")
262
+ try:
263
+ model_name = MODEL_ID.split("/")[-1] + "-abliterated"
264
+ model.push_to_hub(model_name, private=PRIVATE_UPLOAD)
265
+ tokenizer.push_to_hub(model_name, private=PRIVATE_UPLOAD)
266
+ st.success(f"Model automatically pushed as {model_name}")
267
+ update_debug(f"Model automatically pushed to HF Hub as {model_name}.")
268
+ except Exception as e:
269
+ st.error(f"Error while pushing model: {e}")
270
+ update_debug(f"Error while pushing model: {e}")
 
271
 
272
  st.success("Abliteration process complete!")
273
  update_debug("Abliteration process complete.")