Foreshhh commited on
Commit
1ccfd50
·
1 Parent(s): 77369aa

update space

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -216,13 +216,15 @@ PROMPT_MAP = {
216
  MODEL_ID = "OpenSafetyLab/MD-Judge-v0.1"
217
  # MODEL_ID = "/mnt/hwfile/trustai/huxuhao/MD-Judge-v0.1"
218
 
 
 
 
219
  def call_llama_guard_api(question: str, answer: str, evaluation_prompt_select: str):
220
  if torch.cuda.is_available():
221
  device = 'cuda'
222
  else:
223
  device = 'cpu'
224
 
225
-
226
  if 'Domain' in evaluation_prompt_select:
227
  evaluation_propmt = MD_DOMAIN_PROMPT
228
  elif 'Task' in evaluation_prompt_select:
@@ -233,8 +235,7 @@ def call_llama_guard_api(question: str, answer: str, evaluation_prompt_select: s
233
  evaluation_propmt = LlamaGuard2_PROMPT
234
 
235
 
236
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
237
- model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
238
  prompt = evaluation_propmt.strip() % (question.strip(), answer.strip())
239
  inputs = tokenizer(
240
  prompt, return_tensors="pt", add_special_tokens=True
 
216
  MODEL_ID = "OpenSafetyLab/MD-Judge-v0.1"
217
  # MODEL_ID = "/mnt/hwfile/trustai/huxuhao/MD-Judge-v0.1"
218
 
219
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
220
+ model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
221
+
222
  def call_llama_guard_api(question: str, answer: str, evaluation_prompt_select: str):
223
  if torch.cuda.is_available():
224
  device = 'cuda'
225
  else:
226
  device = 'cpu'
227
 
 
228
  if 'Domain' in evaluation_prompt_select:
229
  evaluation_propmt = MD_DOMAIN_PROMPT
230
  elif 'Task' in evaluation_prompt_select:
 
235
  evaluation_propmt = LlamaGuard2_PROMPT
236
 
237
 
238
+
 
239
  prompt = evaluation_propmt.strip() % (question.strip(), answer.strip())
240
  inputs = tokenizer(
241
  prompt, return_tensors="pt", add_special_tokens=True