Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -52,23 +52,20 @@ def format_prompt(message, history):
|
|
52 |
|
53 |
def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.95, repetition_penalty=1.0):
|
54 |
global total_tokens_used
|
55 |
-
input_tokens =
|
56 |
-
total_tokens_used += input_tokens
|
57 |
-
|
58 |
-
|
59 |
-
return f"Error: ์
๋ ฅ์ด ์ต๋ ํ์ฉ ํ ํฐ ์๋ฅผ ์ด๊ณผํฉ๋๋ค. Total tokens used: {total_tokens_used}"
|
60 |
-
|
61 |
-
formatted_prompt = format_prompt(prompt, history)
|
62 |
-
output_accumulated = ""
|
63 |
try:
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
return
|
70 |
except Exception as e:
|
71 |
-
return f"
|
|
|
72 |
|
73 |
def postprocess(history):
|
74 |
user_prompt = history[-1][0]
|
|
|
52 |
|
53 |
def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.95, repetition_penalty=1.0):
|
54 |
global total_tokens_used
|
55 |
+
input_tokens = tokenizer.encode(prompt)
|
56 |
+
total_tokens_used += len(input_tokens)
|
57 |
+
if total_tokens_used >= 32768:
|
58 |
+
return "Error: ์
๋ ฅ์ด ์ต๋ ํ์ฉ ํ ํฐ ์๋ฅผ ์ด๊ณผํ์์ต๋๋ค."
|
|
|
|
|
|
|
|
|
59 |
try:
|
60 |
+
response = client(text=prompt, temperature=temperature, max_tokens=max_new_tokens)
|
61 |
+
response_text = response.get('generated_text', '')
|
62 |
+
if "ํฐ์ปค" in prompt:
|
63 |
+
ticker = prompt.split()[-1]
|
64 |
+
response_text += "\n" + fetch_ticker_info(ticker)
|
65 |
+
return response_text
|
66 |
except Exception as e:
|
67 |
+
return f"์ค๋ฅ ๋ฐ์: {str(e)}"
|
68 |
+
|
69 |
|
70 |
def postprocess(history):
|
71 |
user_prompt = history[-1][0]
|