Update app.py
Browse files
app.py
CHANGED
@@ -87,11 +87,13 @@ def generate(prompt, history=[], temperature=0.1, max_new_tokens=25000, top_p=0.
|
|
87 |
input_tokens = len(tokenizer.encode(prompt))
|
88 |
available_tokens = 32768 - input_tokens
|
89 |
max_new_tokens = min(max_new_tokens, available_tokens)
|
90 |
-
|
91 |
if available_tokens <= 0:
|
92 |
-
|
|
|
93 |
|
94 |
formatted_prompt = format_prompt(prompt, history)
|
|
|
95 |
try:
|
96 |
stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=max_new_tokens,
|
97 |
top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, stream=True)
|
@@ -100,20 +102,10 @@ def generate(prompt, history=[], temperature=0.1, max_new_tokens=25000, top_p=0.
|
|
100 |
if isinstance(response, dict) and 'generated_text' in response:
|
101 |
output += response['generated_text']
|
102 |
else:
|
103 |
-
output += str(response) #
|
104 |
-
|
105 |
except Exception as e:
|
106 |
-
|
107 |
-
|
108 |
-
demo = gr.Interface(
|
109 |
-
fn=generate,
|
110 |
-
inputs=[gr.Textbox(label="질문을 입력하세요", placeholder="여기에 질문을 입력하세요...", lines=2), gr.JSON(label="History", value=[])],
|
111 |
-
outputs=[gr.Markdown(), gr.Label()],
|
112 |
-
title="AIQ 코드파일럿: OpenLLM v1.12",
|
113 |
-
description="AIQ Codepilot과 상호작용해 보세요."
|
114 |
-
)
|
115 |
-
|
116 |
-
demo.launch(show_api=False)
|
117 |
|
118 |
|
119 |
mychatbot = gr.Chatbot(
|
@@ -133,9 +125,6 @@ examples = [
|
|
133 |
["Huggingface와 Gradio를 사용하는 방법에 대해 물어보세요.", []]
|
134 |
]
|
135 |
|
136 |
-
def update_output(result):
|
137 |
-
output_text, used_tokens = result
|
138 |
-
return output_text, f"Used tokens: {used_tokens}"
|
139 |
|
140 |
demo = gr.ChatInterface(
|
141 |
fn=generate,
|
@@ -146,4 +135,4 @@ demo = gr.ChatInterface(
|
|
146 |
examples=examples
|
147 |
)
|
148 |
|
149 |
-
demo.queue().launch(show_api=False)
|
|
|
87 |
input_tokens = len(tokenizer.encode(prompt))
|
88 |
available_tokens = 32768 - input_tokens
|
89 |
max_new_tokens = min(max_new_tokens, available_tokens)
|
90 |
+
|
91 |
if available_tokens <= 0:
|
92 |
+
yield "Error: 입력이 최대 허용 토큰 수를 초과합니다."
|
93 |
+
return
|
94 |
|
95 |
formatted_prompt = format_prompt(prompt, history)
|
96 |
+
|
97 |
try:
|
98 |
stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=max_new_tokens,
|
99 |
top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, stream=True)
|
|
|
102 |
if isinstance(response, dict) and 'generated_text' in response:
|
103 |
output += response['generated_text']
|
104 |
else:
|
105 |
+
output += str(response) # 예외 처리를 개선하여 모든 응답을 문자열로 처리
|
106 |
+
yield output
|
107 |
except Exception as e:
|
108 |
+
yield f"Error: {str(e)} # 에러 로깅을 강화하여 문제를 좀 더 명확히 식별"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
|
111 |
mychatbot = gr.Chatbot(
|
|
|
125 |
["Huggingface와 Gradio를 사용하는 방법에 대해 물어보세요.", []]
|
126 |
]
|
127 |
|
|
|
|
|
|
|
128 |
|
129 |
demo = gr.ChatInterface(
|
130 |
fn=generate,
|
|
|
135 |
examples=examples
|
136 |
)
|
137 |
|
138 |
+
demo.queue().launch(show_api=False)
|