Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,9 @@ import gradio as gr
|
|
3 |
from transformers import GPT2Tokenizer
|
4 |
import yfinance as yf
|
5 |
import pandas as pd
|
|
|
6 |
import matplotlib.pyplot as plt
|
7 |
|
8 |
-
|
9 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
10 |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
11 |
|
@@ -43,64 +43,75 @@ The user provided the additional info about how they would like you to respond:
|
|
43 |
|
44 |
total_tokens_used = 0
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
def get_stock_data(ticker):
|
47 |
stock = yf.Ticker(ticker)
|
48 |
-
hist = stock.history(period="6mo") # ์ง๋ 6๊ฐ์๊ฐ์
|
49 |
return hist
|
50 |
|
51 |
-
def apply_technical_indicators(df):
|
52 |
-
df['SMA'] = df['Close'].rolling(window=20).mean()
|
53 |
-
return df
|
54 |
|
55 |
-
def plot_technical_indicators(df, ticker):
|
56 |
-
plt.figure(figsize=(10, 5))
|
57 |
-
plt.plot(df['Close'], label='Close Price')
|
58 |
-
plt.plot(df['SMA'], label='20-day SMA')
|
59 |
-
plt.title(f'Technical Indicators for {ticker}')
|
60 |
-
plt.legend()
|
61 |
-
plt.savefig('/mnt/data/Technical_Indicators.png')
|
62 |
-
plt.close()
|
63 |
-
return '/mnt/data/Technical_Indicators.png'
|
64 |
|
65 |
-
# Gradio ์ธํฐํ์ด์ค ์์ฑ์ ์ํ ํจ์
|
66 |
def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.95, repetition_penalty=1.0):
|
|
|
67 |
input_tokens = len(tokenizer.encode(prompt))
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
70 |
|
|
|
|
|
71 |
try:
|
72 |
ticker = prompt.upper()
|
73 |
stock_data = get_stock_data(ticker)
|
74 |
if not stock_data.empty:
|
75 |
-
|
76 |
-
image_path = plot_technical_indicators(
|
77 |
-
|
78 |
else:
|
79 |
-
|
80 |
except Exception as e:
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
# Gradio ์ธํฐํ์ด์ค ์ค์
|
84 |
examples = [
|
85 |
-
["
|
86 |
-
["
|
87 |
-
["
|
88 |
-
["
|
89 |
-
["TSLA", []]
|
90 |
]
|
91 |
|
92 |
css = """
|
93 |
-
h1 {
|
94 |
-
|
|
|
|
|
|
|
|
|
95 |
"""
|
96 |
|
97 |
-
demo = gr.
|
98 |
fn=generate,
|
99 |
-
|
100 |
-
|
101 |
-
|
|
|
102 |
css=css,
|
103 |
-
|
104 |
)
|
105 |
|
106 |
-
demo.launch()
|
|
|
3 |
from transformers import GPT2Tokenizer
|
4 |
import yfinance as yf
|
5 |
import pandas as pd
|
6 |
+
import numpy as np
|
7 |
import matplotlib.pyplot as plt
|
8 |
|
|
|
9 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
10 |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
11 |
|
|
|
43 |
|
44 |
total_tokens_used = 0
|
45 |
|
46 |
+
def format_prompt(message, history):
|
47 |
+
prompt = "<s>[SYSTEM] {} [/SYSTEM]".format(system_instruction)
|
48 |
+
for user_prompt, bot_response in history:
|
49 |
+
prompt += f"[INST] {user_prompt} [/INST]{bot_response}</s> "
|
50 |
+
prompt += f"[INST] {message} [/INST]"
|
51 |
+
return prompt
|
52 |
+
|
53 |
def get_stock_data(ticker):
|
54 |
stock = yf.Ticker(ticker)
|
55 |
+
hist = stock.history(period="6mo") # ์ง๋ 6๊ฐ์๊ฐ์ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ต๋๋ค.
|
56 |
return hist
|
57 |
|
|
|
|
|
|
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
|
|
60 |
def generate(prompt, history=[], temperature=0.1, max_new_tokens=10000, top_p=0.95, repetition_penalty=1.0):
|
61 |
+
global total_tokens_used
|
62 |
input_tokens = len(tokenizer.encode(prompt))
|
63 |
+
total_tokens_used += input_tokens
|
64 |
+
available_tokens = 32768 - total_tokens_used
|
65 |
+
if available_tokens <= 0:
|
66 |
+
yield f"Error: ์
๋ ฅ์ด ์ต๋ ํ์ฉ ํ ํฐ ์๋ฅผ ์ด๊ณผํฉ๋๋ค. Total tokens used: {total_tokens_used}"
|
67 |
+
return
|
68 |
|
69 |
+
formatted_prompt = format_prompt(prompt, history)
|
70 |
+
output_accumulated = ""
|
71 |
try:
|
72 |
ticker = prompt.upper()
|
73 |
stock_data = get_stock_data(ticker)
|
74 |
if not stock_data.empty:
|
75 |
+
enhanced_data = apply_technical_indicators(stock_data)
|
76 |
+
image_path = plot_technical_indicators(enhanced_data)
|
77 |
+
yield f"Technical analysis for {ticker} completed. See the chart here: {image_path}\n\n---\nTotal tokens used: {total_tokens_used}"
|
78 |
else:
|
79 |
+
yield f"No data available for {ticker}. Please check the ticker and try again."
|
80 |
except Exception as e:
|
81 |
+
yield f"Error: {str(e)}\nTotal tokens used: {total_tokens_used}"
|
82 |
+
|
83 |
+
mychatbot = gr.Chatbot(
|
84 |
+
avatar_images=["./user.png", "./botm.png"],
|
85 |
+
bubble_full_width=False,
|
86 |
+
show_label=False,
|
87 |
+
show_copy_button=True,
|
88 |
+
likeable=True,
|
89 |
+
)
|
90 |
|
|
|
91 |
examples = [
|
92 |
+
["๋ฐ๋์ ํ๊ธ๋ก ๋ต๋ณํ ๊ฒ.", []],
|
93 |
+
["์ข์ ์ข
๋ชฉ(ํฐ์ปค) ์ถ์ฒํด์ค", []],
|
94 |
+
["์์ฝ ๊ฒฐ๋ก ์ ์ ์ํด", []],
|
95 |
+
["ํฌํธํด๋ฆฌ์ค ๋ถ์ํด์ค", []]
|
|
|
96 |
]
|
97 |
|
98 |
css = """
|
99 |
+
h1 {
|
100 |
+
font-size: 14px;
|
101 |
+
}
|
102 |
+
footer {
|
103 |
+
visibility: hidden;
|
104 |
+
}
|
105 |
"""
|
106 |
|
107 |
+
demo = gr.ChatInterface(
|
108 |
fn=generate,
|
109 |
+
chatbot=mychatbot,
|
110 |
+
title="๊ธ๋ก๋ฒ ์์ฐ ๋ถ์ ๋ฐ ์์ธก LLM: BloombAI",
|
111 |
+
retry_btn=None,
|
112 |
+
undo_btn=None,
|
113 |
css=css,
|
114 |
+
examples=examples
|
115 |
)
|
116 |
|
117 |
+
demo.queue().launch(show_api=False)
|