Update app.py
Browse files
app.py
CHANGED
@@ -83,14 +83,13 @@ def format_prompt(message, history):
|
|
83 |
prompt += f"[INST] {message} [/INST]"
|
84 |
return prompt
|
85 |
|
86 |
-
def generate(prompt, history=[], temperature=0.1, max_new_tokens=
|
87 |
input_tokens = len(tokenizer.encode(prompt))
|
88 |
available_tokens = 32768 - input_tokens
|
89 |
max_new_tokens = min(max_new_tokens, available_tokens)
|
90 |
|
91 |
if available_tokens <= 0:
|
92 |
-
|
93 |
-
return
|
94 |
|
95 |
formatted_prompt = format_prompt(prompt, history)
|
96 |
try:
|
@@ -102,9 +101,10 @@ def generate(prompt, history=[], temperature=0.1, max_new_tokens=25000, top_p=0.
|
|
102 |
output += response['generated_text']
|
103 |
else:
|
104 |
output += str(response)
|
105 |
-
|
106 |
except Exception as e:
|
107 |
-
|
|
|
108 |
|
109 |
|
110 |
|
@@ -129,14 +129,13 @@ def update_output(result):
|
|
129 |
output_text, used_tokens = result
|
130 |
return output_text, f"Used tokens: {used_tokens}"
|
131 |
|
132 |
-
demo = gr.
|
133 |
fn=generate,
|
134 |
-
|
135 |
-
outputs=[gr.Markdown(), gr.Label()],
|
136 |
-
examples=examples,
|
137 |
title="AIQ ์ฝ๋ํ์ผ๋ฟ: OpenLLM v1.12",
|
138 |
-
|
139 |
-
|
|
|
140 |
)
|
141 |
|
142 |
-
demo.launch(show_api=False)
|
|
|
83 |
prompt += f"[INST] {message} [/INST]"
|
84 |
return prompt
|
85 |
|
86 |
+
def generate(prompt, history=[], temperature=0.1, max_new_tokens=24000, top_p=0.95, repetition_penalty=1.0):
|
87 |
input_tokens = len(tokenizer.encode(prompt))
|
88 |
available_tokens = 32768 - input_tokens
|
89 |
max_new_tokens = min(max_new_tokens, available_tokens)
|
90 |
|
91 |
if available_tokens <= 0:
|
92 |
+
return "Error: ์
๋ ฅ์ด ์ต๋ ํ์ฉ ํ ํฐ ์๋ฅผ ์ด๊ณผํฉ๋๋ค.", f"Used tokens: {input_tokens}"
|
|
|
93 |
|
94 |
formatted_prompt = format_prompt(prompt, history)
|
95 |
try:
|
|
|
101 |
output += response['generated_text']
|
102 |
else:
|
103 |
output += str(response)
|
104 |
+
return output, f"Used tokens: {input_tokens + max_new_tokens}"
|
105 |
except Exception as e:
|
106 |
+
return f"Error: {str(e)}", "Used tokens: 0"
|
107 |
+
|
108 |
|
109 |
|
110 |
|
|
|
129 |
output_text, used_tokens = result
|
130 |
return output_text, f"Used tokens: {used_tokens}"
|
131 |
|
132 |
+
demo = gr.ChatInterface(
|
133 |
fn=generate,
|
134 |
+
chatbot=mychatbot,
|
|
|
|
|
135 |
title="AIQ ์ฝ๋ํ์ผ๋ฟ: OpenLLM v1.12",
|
136 |
+
retry_btn=None,
|
137 |
+
undo_btn=None,
|
138 |
+
examples=examples
|
139 |
)
|
140 |
|
141 |
+
demo.queue().launch(show_api=False)
|