StevenChen16 commited on
Commit
dced7e4
·
verified ·
1 Parent(s): e5d9b14

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +78 -3
README.md CHANGED
@@ -1,3 +1,78 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ Have to use with basemodel "princeton-nlp/Llama-3-Instruct-8B-SimPO".
5
+ Here's a example Demo code with Gradio:
6
+ ```
7
+ import gradio as gr
8
+ from llamafactory.chat import ChatModel
9
+ from llamafactory.extras.misc import torch_gc
10
+ import re
11
+
12
+ def split_into_sentences(text):
13
+ sentence_endings = re.compile(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?|\!)\s')
14
+ sentences = sentence_endings.split(text)
15
+ return [sentence.strip() for sentence in sentences if sentence]
16
+
17
+ def process_paragraph(paragraph, progress=gr.Progress()):
18
+ sentences = split_into_sentences(paragraph)
19
+ results = []
20
+ total_sentences = len(sentences)
21
+ for i, sentence in enumerate(sentences):
22
+ progress((i + 1) / total_sentences)
23
+ messages.append({"role": "user", "content": sentence})
24
+ sentence_response = ""
25
+ for new_text in chat_model.stream_chat(messages, temperature=0.7, top_p=0.9, top_k=50, max_new_tokens=300):
26
+ sentence_response += new_text.strip()
27
+ category = sentence_response.strip().lower().replace(' ', '_')
28
+ if category != "fair":
29
+ results.append((sentence, category))
30
+ else:
31
+ results.append((sentence, "fair"))
32
+ messages.append({"role": "assistant", "content": sentence_response})
33
+ torch_gc()
34
+ return results
35
+
36
+
37
+ args = dict(
38
+ model_name_or_path="princeton-nlp/Llama-3-Instruct-8B-SimPO", # 使用量化的 Llama-3-8B-Instruct 模型
39
+ adapter_name_or_path="StevenChen16/llama3-8b-compliance-review-adapter", # 加载保存的 LoRA 适配器
40
+ template="llama3", # 与训练时使用的模板相同
41
+ finetuning_type="lora", # 与训练时使用的微调类型相同
42
+ quantization_bit=8, # 加载 4-bit 量化模型
43
+ use_unsloth=True, # 使用 UnslothAI 的 LoRA 优化以加速生成
44
+ )
45
+ chat_model = ChatModel(args)
46
+ messages = []
47
+
48
+ # 定义类型到颜色的映射
49
+ label_to_color = {
50
+ "fair": "green",
51
+ "limitation_of_liability": "red",
52
+ "unilateral_termination": "orange",
53
+ "unilateral_change": "yellow",
54
+ "content_removal": "purple",
55
+ "contract_by_using": "blue",
56
+ "choice_of_law": "cyan",
57
+ "jurisdiction": "magenta",
58
+ "arbitration": "brown",
59
+ }
60
+
61
+ with gr.Blocks() as demo:
62
+
63
+ with gr.Row(equal_height=True):
64
+ with gr.Column():
65
+ input_text = gr.Textbox(label="Input Paragraph", lines=10, placeholder="Enter the paragraph here...")
66
+ btn = gr.Button("Process")
67
+ with gr.Column():
68
+ output = gr.HighlightedText(label="Processed Paragraph", color_map=label_to_color)
69
+ progress = gr.Progress()
70
+
71
+ def on_click(paragraph):
72
+ results = process_paragraph(paragraph, progress=progress)
73
+ return results
74
+
75
+ btn.click(on_click, inputs=input_text, outputs=[output])
76
+
77
+ demo.launch(share=True)
78
+ ```