littlebird13 commited on
Commit
a826f18
1 Parent(s): ce3d615

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -0
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ os.system('pip install transformers -U')
4
+ os.system('pip install modelscope -U')
5
+ os.system('pip install accelerate')
6
+ from threading import Thread
7
+ from typing import Iterator
8
+
9
+ import gradio as gr
10
+ import torch
11
+ from modelscope import AutoModelForCausalLM, AutoTokenizer
12
+ from transformers import TextIteratorStreamer
13
+
14
+ MAX_MAX_NEW_TOKENS = 2048
15
+ DEFAULT_MAX_NEW_TOKENS = 1024
16
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
17
+
18
+
19
+ if not torch.cuda.is_available():
20
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
21
+
22
+
23
+ if torch.cuda.is_available():
24
+ model_id = "qwen/Qwen1.5-1.8B-Chat"
25
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
26
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
27
+ tokenizer.use_default_system_prompt = False
28
+
29
+ @spaces.GPU
30
+ def generate(
31
+ message: str,
32
+ chat_history: list[tuple[str, str]],
33
+ system_prompt: str,
34
+ max_new_tokens: int = 1024,
35
+ temperature: float = 0.6,
36
+ top_p: float = 0.9,
37
+ top_k: int = 50,
38
+ repetition_penalty: float = 1.2,
39
+ ) -> Iterator[str]:
40
+ conversation = []
41
+ if system_prompt:
42
+ conversation.append({"role": "system", "content": system_prompt})
43
+ for user, assistant in chat_history:
44
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
45
+ conversation.append({"role": "user", "content": message})
46
+
47
+ input_ids = tokenizer.apply_chat_template(conversation, tokenize=False,add_generation_prompt=True)
48
+ input_ids = tokenizer([input_ids],return_tensors="pt").to(model.device)
49
+
50
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
51
+ generate_kwargs = dict(
52
+ input_ids=input_ids.input_ids,
53
+ streamer=streamer,
54
+ max_new_tokens=max_new_tokens,
55
+ do_sample=True,
56
+ top_p=top_p,
57
+ top_k=top_k,
58
+ temperature=temperature,
59
+ repetition_penalty=repetition_penalty,
60
+ )
61
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
62
+ t.start()
63
+ #dictionary update sequence element #0 has length 19; 2 is required
64
+
65
+ outputs = []
66
+ for text in streamer:
67
+ outputs.append(text)
68
+ yield "".join(outputs)
69
+
70
+ #outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
71
+ print(outputs)
72
+ #yield outputs
73
+
74
+
75
+ chat_interface = gr.ChatInterface(
76
+ fn=generate,
77
+ additional_inputs=[
78
+ gr.Textbox(label="System prompt", lines=6),
79
+ gr.Slider(
80
+ label="Max new tokens",
81
+ minimum=1,
82
+ maximum=MAX_MAX_NEW_TOKENS,
83
+ step=1,
84
+ value=DEFAULT_MAX_NEW_TOKENS,
85
+ ),
86
+ gr.Slider(
87
+ label="Temperature",
88
+ minimum=0.1,
89
+ maximum=4.0,
90
+ step=0.1,
91
+ value=0.6,
92
+ ),
93
+ gr.Slider(
94
+ label="Top-p (nucleus sampling)",
95
+ minimum=0.05,
96
+ maximum=1.0,
97
+ step=0.05,
98
+ value=0.9,
99
+ ),
100
+ gr.Slider(
101
+ label="Top-k",
102
+ minimum=1,
103
+ maximum=1000,
104
+ step=1,
105
+ value=50,
106
+ ),
107
+ gr.Slider(
108
+ label="Repetition penalty",
109
+ minimum=1.0,
110
+ maximum=2.0,
111
+ step=0.05,
112
+ value=1.2,
113
+ ),
114
+ ],
115
+ stop_btn=None,
116
+ examples=[
117
+ ["你好!你是谁?"],
118
+ ["请简单介绍一下大语言模型?"],
119
+ ["请讲一个小人物成功的故事."],
120
+ ["浙江的省会在哪里?"],
121
+ ["写一篇100字的文章,题目是'人工智能开源的优势'"],
122
+ ],
123
+ )
124
+
125
+ with gr.Blocks(css="style.css") as demo:
126
+ gr.Markdown("""<p align="center"><img src="https://modelscope.cn/api/v1/models/qwen/Qwen-VL-Chat/repo?Revision=master&FilePath=assets/logo.jpg&View=true" style="height: 80px"/><p>""")
127
+ gr.Markdown("""<center><font size=8>Qwen1.5-1.8B-Chat Bot👾</center>""")
128
+ gr.Markdown("""<center><font size=4>通义千问1.5-1.8B(Qwen1.5-1.8B) 是阿里云研发的通义千问大模型系列的70亿参数规模的模型。</center>""")
129
+ chat_interface.render()
130
+
131
+ if __name__ == "__main__":
132
+ demo.queue(max_size=20).launch()