OS07 commited on
Commit
62d95e2
·
verified ·
1 Parent(s): 7fc3a43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -13,6 +13,15 @@ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
13
  # Load the Lora model
14
  model = PeftModel.from_pretrained(model, peft_model_id)
15
 
 
 
 
 
 
 
 
 
 
16
  def get_result(query):
17
  pipe = pipeline("text-generation", model="OS07/Letsee", torch_dtype=torch.bfloat16, device_map="auto")
18
  prompt_template = "<|system|>\n<|end|>\n<|user|>\n{query}<|end|>\n<|assistant|>"
@@ -22,14 +31,6 @@ def get_result(query):
22
  result = generated_output_filtering(outputs)
23
  return result
24
 
25
- def generated_output_filtering(output):
26
- if len(output) > 0:
27
- str1=str(list(output[0].values()))
28
- if 'assistant' in str1:
29
- result=str1[str1.find('|assistant|')+len('|assistant|>'):]
30
- return result
31
- else:
32
- return None
33
 
34
  #result=generated_output_filtering(outputs)
35
  #result
 
13
  # Load the Lora model
14
  model = PeftModel.from_pretrained(model, peft_model_id)
15
 
16
+ def generated_output_filtering(output):
17
+ if len(output) > 0:
18
+ str1=str(list(output[0].values()))
19
+ if 'assistant' in str1:
20
+ result=str1[str1.find('|assistant|')+len('|assistant|>'):]
21
+ return result
22
+ else:
23
+ return None
24
+
25
  def get_result(query):
26
  pipe = pipeline("text-generation", model="OS07/Letsee", torch_dtype=torch.bfloat16, device_map="auto")
27
  prompt_template = "<|system|>\n<|end|>\n<|user|>\n{query}<|end|>\n<|assistant|>"
 
31
  result = generated_output_filtering(outputs)
32
  return result
33
 
 
 
 
 
 
 
 
 
34
 
35
  #result=generated_output_filtering(outputs)
36
  #result