Dooratre commited on
Commit
8210490
·
verified ·
1 Parent(s): ab550cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -46
app.py CHANGED
@@ -1,60 +1,142 @@
1
- import os
 
 
 
2
  import requests
3
  import json
4
- from io import BytesIO
5
-
6
- from flask import Flask, jsonify, render_template, request, send_file
7
-
8
- from modules.inference import infer_t5
9
- from modules.dataset import query_emotion
10
-
11
- # https://huggingface.co/settings/tokens
12
- # https://huggingface.co/spaces/{username}/{space}/settings
13
- API_TOKEN = os.getenv("BIG_GAN_TOKEN")
14
 
15
  app = Flask(__name__)
16
 
 
 
 
17
 
18
- @app.route("/")
19
- def index():
20
- return render_template("index.html")
21
-
22
-
23
- @app.route("/infer_biggan")
24
- def biggan():
25
- input = request.args.get("input")
26
-
27
- output = requests.request(
28
- "POST",
29
- "https://api-inference.huggingface.co/models/osanseviero/BigGAN-deep-128",
30
- headers={"Authorization": f"Bearer {API_TOKEN}"},
31
- data=json.dumps(input),
32
- )
33
 
34
- return send_file(BytesIO(output.content), mimetype="image/png")
 
 
35
 
 
 
 
 
36
 
37
- @app.route("/infer_t5")
38
- def t5():
39
- input = request.args.get("input")
 
 
 
40
 
41
- output = infer_t5(input)
42
-
43
- return jsonify({"output": output})
44
-
45
-
46
- @app.route("/query_emotion")
47
- def emotion():
48
- start = request.args.get("start")
49
- end = request.args.get("end")
50
-
51
- print(start)
52
- print(end)
53
-
54
- output = query_emotion(int(start), int(end))
55
-
56
- return jsonify({"output": output})
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  app.run(host="0.0.0.0", port=7860)
 
1
+ from flask import Flask, render_template, request, send_from_directory, url_for
2
+ from datetime import datetime
3
+ from langchain_community.llms import HuggingFaceHub
4
+ from langchain.prompts import PromptTemplate
5
  import requests
6
  import json
7
+ import nltk
8
+ from textblob import TextBlob
9
+ from nltk.tokenize import word_tokenize
10
+ from nltk.stem import PorterStemmer
11
+ from nltk.stem import WordNetLemmatizer
 
 
 
 
 
12
 
13
  app = Flask(__name__)
14
 
15
+ # Load the JSON data from the file
16
+ with open('ai_chatbot_data.json', 'r') as file:
17
+ json_data = json.load(file)
18
 
19
+ with open('info.txt', 'r') as file:
20
+ database_content = file.read()
21
+ database_tag = database_content
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ template = "Message: {message}\n\nConversation History: {history}\n\nDate and Time: {date_time}\n\nBitcoin Price: ${bitcoin_price}\n\nBitcoin history from 1-jan-2024 to today: {database_tag}\n\nYour system: {json_data}.\n\nResponse:"
24
+ prompt = PromptTemplate(template=template, input_variables=["message","history", "date_time", "bitcoin_price", "database_tag", "json_data"])
25
+ conversation_history = []
26
 
27
+ def get_bitcoin_price():
28
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
29
+ url = 'https://api.coindesk.com/v1/bpi/currentprice.json'
30
+ response = requests.get(url)
31
 
32
+ if response.status_code == 200:
33
+ data = response.json()
34
+ bitcoin_price = data['bpi']['USD']['rate']
35
+ return bitcoin_price, current_time
36
+ else:
37
+ return 'Error fetching data', current_time
38
 
39
+ @app.route('/assets/<path:path>')
40
+ def send_static(path):
41
+ return send_from_directory('assets', path)
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
+ @app.route('/')
44
+ def index():
45
+ global conversation_history
46
+ return render_template('index.html', conversation=conversation_history)
47
+
48
+ @app.route('/submit', methods=['POST'])
49
+ def submit():
50
+ user_input = request.json.get('user_input')
51
+
52
+ tokens = word_tokenize(user_input)
53
+ ps = PorterStemmer()
54
+ stemmed_tokens = [ps.stem(token) for token in tokens]
55
+
56
+ lemmatizer = WordNetLemmatizer()
57
+ lemmatized_tokens = [lemmatizer.lemmatize(token) for token in tokens]
58
+
59
+ sentiment = TextBlob(user_input).sentiment
60
+
61
+ bitcoin_price, current_time = get_bitcoin_price()
62
+
63
+ conversation_history.append("User: " + user_input)
64
+
65
+ # NLTK processing for conversation history
66
+ history_tokens = word_tokenize("<br>".join(conversation_history))
67
+ history_stemmed_tokens = [ps.stem(token) for token in history_tokens]
68
+ history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]
69
+
70
+
71
+ model_input = prompt.format(message=user_input, history="<br>".join(conversation_history), database_tag=database_content, date_time=current_time, bitcoin_price=bitcoin_price, json_data=json_data,history_tokens=history_tokens,history_stemmed_tokens=history_stemmed_tokens,history_lemmatized_tokens=history_lemmatized_tokens)
72
+ response = llm(model_input)
73
+
74
+ bot_response = response.split('Response:')[1].strip()
75
+ bot_response = bot_response.strip().replace('\n', '<br>')
76
+ conversation_history.append("Bot: " + bot_response)
77
+
78
+ conversation_html = '<br>'.join(conversation_history)
79
+
80
+ return bot_response
81
+ ##############################################################################################
82
+ @app.route('/add_data', methods=['GET', 'POST'])
83
+ def add_data():
84
+ if request.method == 'POST':
85
+ date = request.form['date']
86
+ open_price = request.form['open_price']
87
+ high_price = request.form['high_price']
88
+ low_price = request.form['low_price']
89
+ close_price = request.form['close_price']
90
+ adj_close = request.form['adj_close']
91
+ volume = request.form['volume']
92
+
93
+ new_data = [date, open_price, high_price, low_price, close_price, adj_close, volume]
94
+
95
+ with open('info.txt', 'a') as file:
96
+ file.write('\t'.join(new_data) + '\n')
97
+
98
+ return render_template('admin.html')
99
+ ################################################################################################################################
100
+ @app.route('/clear_history')
101
+ def clear_history():
102
+ global conversation_history
103
+ conversation_history = []
104
+ return 'Conversation history cleared'
105
+
106
+ with open('i.txt', 'r') as file:
107
+ data = file.read()
108
 
109
  if __name__ == "__main__":
110
+ repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
111
+ huggingfacehub_api_token = "hf" + data
112
+
113
+ llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
114
+ repo_id=repo_id,
115
+ model_kwargs={
116
+ "temperature": 0.5,
117
+ "max_new_tokens": 512,
118
+ "top_p": 0.3,
119
+ "repetition_penalty": 1.2,
120
+ "num_beams": 3,
121
+ "length_penalty": 1.5,
122
+ "no_repeat_ngram_size": 2,
123
+ "early_stopping": True,
124
+ "num_return_sequences": 1,
125
+ "use_cache": True,
126
+ "task": "predictions",
127
+ "data_source": "financial_markets",
128
+ "historical_data_fetch": True,
129
+ "real-time_data_integration": True,
130
+ "feature_engineering": ["technical_indicators", "sentiment_analysis", "volume_analysis"],
131
+ "machine_learning_models": ["LSTM", "Random Forest", "ARIMA", "Gradient Boosting"],
132
+ "prediction_horizon": "short-term",
133
+ "evaluation_metrics": ["accuracy", "MSE", "MAE", "RMSE"],
134
+ "model_fine-tuning": True,
135
+ "interpretability_explanation": True,
136
+ "ensemble_methods": ["voting", "stacking"],
137
+ "hyperparameter_optimization": True,
138
+ "cross-validation": True,
139
+ "online_learning": True,
140
+ }
141
+ )
142
  app.run(host="0.0.0.0", port=7860)