Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -62,7 +62,6 @@ def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.
|
|
62 |
input_tokens = len(tokenizer.encode(prompt))
|
63 |
total_tokens_used += input_tokens
|
64 |
available_tokens = 32768 - total_tokens_used
|
65 |
-
|
66 |
if available_tokens <= 0:
|
67 |
yield f"Error: ์
๋ ฅ์ด ์ต๋ ํ์ฉ ํ ํฐ ์๋ฅผ ์ด๊ณผํฉ๋๋ค. Total tokens used: {total_tokens_used}"
|
68 |
return
|
@@ -70,8 +69,16 @@ def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.
|
|
70 |
formatted_prompt = format_prompt(prompt, history)
|
71 |
output_accumulated = ""
|
72 |
try:
|
73 |
-
stream = client.text_generation(
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
for response in stream:
|
76 |
output_part = response['generated_text'] if 'generated_text' in response else str(response)
|
77 |
output_accumulated += output_part
|
@@ -96,7 +103,6 @@ def process_financial_data(ticker):
|
|
96 |
valid, message = validate_ticker(ticker)
|
97 |
if not valid:
|
98 |
return f"Error: {message} - ์ฃผ์ ํฐ์ปค '{ticker}'๋ฅผ ํ์ธํ์ธ์."
|
99 |
-
|
100 |
try:
|
101 |
stock = yf.Ticker(ticker)
|
102 |
real_time_price = stock.history(period="1d")
|
@@ -119,20 +125,20 @@ mychatbot = gr.Chatbot(
|
|
119 |
likeable=True,
|
120 |
)
|
121 |
|
122 |
-
|
123 |
examples = [
|
124 |
-
["๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ.", []], # history ๊ฐ์ ๋น ๋ฆฌ์คํธ๋ก ์ ๊ณต
|
125 |
["๋ถ์ ๊ฒฐ๊ณผ ๋ณด๊ณ ์ ๋ค์ ์ถ๋ ฅํ ๊ฒ", []],
|
126 |
["์ถ์ฒ ์ข
๋ชฉ ์๋ ค์ค", []],
|
127 |
["๊ทธ ์ข
๋ชฉ ํฌ์ ์ ๋ง ์์ธกํด", []]
|
128 |
]
|
129 |
|
130 |
-
|
131 |
css = """
|
132 |
h1 {
|
133 |
font-size: 14px; /* ์ ๋ชฉ ๊ธ๊ผด ํฌ๊ธฐ๋ฅผ ์๊ฒ ์ค์ */
|
134 |
}
|
135 |
-
footer {
|
|
|
|
|
136 |
"""
|
137 |
|
138 |
demo = gr.ChatInterface(
|
@@ -142,7 +148,7 @@ demo = gr.ChatInterface(
|
|
142 |
retry_btn=None,
|
143 |
undo_btn=None,
|
144 |
css=css,
|
145 |
-
examples=examples
|
146 |
)
|
147 |
|
148 |
demo.queue().launch(show_api=False)
|
|
|
62 |
input_tokens = len(tokenizer.encode(prompt))
|
63 |
total_tokens_used += input_tokens
|
64 |
available_tokens = 32768 - total_tokens_used
|
|
|
65 |
if available_tokens <= 0:
|
66 |
yield f"Error: ์
๋ ฅ์ด ์ต๋ ํ์ฉ ํ ํฐ ์๋ฅผ ์ด๊ณผํฉ๋๋ค. Total tokens used: {total_tokens_used}"
|
67 |
return
|
|
|
69 |
formatted_prompt = format_prompt(prompt, history)
|
70 |
output_accumulated = ""
|
71 |
try:
|
72 |
+
stream = client.text_generation(
|
73 |
+
formatted_prompt,
|
74 |
+
temperature=temperature,
|
75 |
+
max_new_tokens=min(max_new_tokens, available_tokens),
|
76 |
+
top_p=top_p,
|
77 |
+
repetition_penalty=repetition_penalty,
|
78 |
+
do_sample=True,
|
79 |
+
seed=42,
|
80 |
+
stream=True
|
81 |
+
)
|
82 |
for response in stream:
|
83 |
output_part = response['generated_text'] if 'generated_text' in response else str(response)
|
84 |
output_accumulated += output_part
|
|
|
103 |
valid, message = validate_ticker(ticker)
|
104 |
if not valid:
|
105 |
return f"Error: {message} - ์ฃผ์ ํฐ์ปค '{ticker}'๋ฅผ ํ์ธํ์ธ์."
|
|
|
106 |
try:
|
107 |
stock = yf.Ticker(ticker)
|
108 |
real_time_price = stock.history(period="1d")
|
|
|
125 |
likeable=True,
|
126 |
)
|
127 |
|
|
|
128 |
examples = [
|
129 |
+
["๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ.", []], # history ๊ฐ์ ๋น ๋ฆฌ์คํธ๋ก ์ ๊ณต
|
130 |
["๋ถ์ ๊ฒฐ๊ณผ ๋ณด๊ณ ์ ๋ค์ ์ถ๋ ฅํ ๊ฒ", []],
|
131 |
["์ถ์ฒ ์ข
๋ชฉ ์๋ ค์ค", []],
|
132 |
["๊ทธ ์ข
๋ชฉ ํฌ์ ์ ๋ง ์์ธกํด", []]
|
133 |
]
|
134 |
|
|
|
135 |
css = """
|
136 |
h1 {
|
137 |
font-size: 14px; /* ์ ๋ชฉ ๊ธ๊ผด ํฌ๊ธฐ๋ฅผ ์๊ฒ ์ค์ */
|
138 |
}
|
139 |
+
footer {
|
140 |
+
visibility: hidden;
|
141 |
+
}
|
142 |
"""
|
143 |
|
144 |
demo = gr.ChatInterface(
|
|
|
148 |
retry_btn=None,
|
149 |
undo_btn=None,
|
150 |
css=css,
|
151 |
+
examples=examples
|
152 |
)
|
153 |
|
154 |
demo.queue().launch(show_api=False)
|