Smiley0707 commited on
Commit
2f61516
1 Parent(s): 4b68816

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -0
app.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import spaces
4
+ import torch
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
6
+ import gradio as gr
7
+ from threading import Thread
8
+
9
+ MODEL_LIST = ["meta-llama/Meta-Llama-3.1-405B-Instruct"]
10
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
11
+ MODEL = os.environ.get("MODEL_ID")
12
+
13
+ print(MODEL)
14
+
15
+ TITLE = "<h1><center>Meta-Llama3.1-405B</center></h1>"
16
+
17
+ PLACEHOLDER = """
18
+ <center>
19
+ <p>Hi! How can I help you today?</p>
20
+ </center>
21
+ """
22
+
23
+
24
+ CSS = """
25
+ .duplicate-button {
26
+ margin: auto !important;
27
+ color: white !important;
28
+ background: black !important;
29
+ border-radius: 100vh !important;
30
+ }
31
+ h3 {
32
+ text-align: center;
33
+ }
34
+ """
35
+
36
+ device = "cuda" # for GPU usage or "cpu" for CPU usage
37
+
38
+ quantization_config = BitsAndBytesConfig(
39
+ load_in_4bit=True,
40
+ bnb_4bit_compute_dtype=torch.bfloat16,
41
+ bnb_4bit_use_double_quant=True,
42
+ bnb_4bit_quant_type= "nf4")
43
+
44
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
45
+ model = AutoModelForCausalLM.from_pretrained(
46
+ MODEL,
47
+ torch_dtype=torch.bfloat16,
48
+ device_map="auto",
49
+ quantization_config=quantization_config)
50
+
51
+ @spaces.GPU()
52
+ def stream_chat(
53
+ message: str,
54
+ history: list,
55
+ system_prompt: str,
56
+ temperature: float = 0.8,
57
+ max_new_tokens: int = 1024,
58
+ top_p: float = 1.0,
59
+ top_k: int = 20,
60
+ penalty: float = 1.2,
61
+ ):
62
+ print(f'message: {message}')
63
+ print(f'history: {history}')
64
+
65
+ conversation = [
66
+ {"role": "system", "content": system_prompt}
67
+ ]
68
+ for prompt, answer in history:
69
+ conversation.extend([
70
+ {"role": "user", "content": prompt},
71
+ {"role": "assistant", "content": answer},
72
+ ])
73
+
74
+ conversation.append({"role": "user", "content": message})
75
+
76
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
77
+
78
+ streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
79
+
80
+ generate_kwargs = dict(
81
+ input_ids=input_ids,
82
+ max_new_tokens = max_new_tokens,
83
+ do_sample = False if temperature == 0 else True,
84
+ top_p = top_p,
85
+ top_k = top_k,
86
+ temperature = temperature,
87
+ repetition_penalty=penalty,
88
+ eos_token_id=[128001,128008,128009],
89
+ streamer=streamer,
90
+ )
91
+
92
+ with torch.no_grad():
93
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
94
+ thread.start()
95
+
96
+ buffer = ""
97
+ for new_text in streamer:
98
+ buffer += new_text
99
+ yield buffer
100
+
101
+
102
+ chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
103
+
104
+ with gr.Blocks(css=CSS, theme="soft") as demo:
105
+ gr.HTML(TITLE)
106
+ gr.ChatInterface(
107
+ fn=stream_chat,
108
+ chatbot=chatbot,
109
+ fill_height=True,
110
+ cache_examples=False,
111
+ )
112
+
113
+
114
+ if _name_ == "__main__":
115
+ demo.launch()