File size: 6,065 Bytes
8210490
 
 
 
db6e2f8
 
8210490
 
 
 
 
db6e2f8
 
 
8210490
 
 
db6e2f8
8210490
 
 
db6e2f8
8210490
 
 
db6e2f8
8210490
 
 
 
db6e2f8
8210490
 
 
 
 
 
db6e2f8
8210490
 
 
db6e2f8
8210490
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db6e2f8
 
8210490
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db6e2f8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
from flask import Flask, render_template, request, send_from_directory, url_for
from datetime import datetime
from langchain_community.llms import HuggingFaceHub
from langchain.prompts import PromptTemplate
import requests
import json
import nltk
from textblob import TextBlob
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer

app = Flask(__name__)

# Load the JSON data from the file
with open('ai_chatbot_data.json', 'r') as file:
    json_data = json.load(file)

with open('info.txt', 'r') as file:
    database_content = file.read()
database_tag = database_content

template = "Message: {message}\n\nConversation History: {history}\n\nDate and Time: {date_time}\n\nBitcoin Price: ${bitcoin_price}\n\nBitcoin history from 1-jan-2024 to today: {database_tag}\n\nYour system: {json_data}.\n\nResponse:"
prompt = PromptTemplate(template=template, input_variables=["message","history", "date_time", "bitcoin_price", "database_tag", "json_data"])
conversation_history = []

def get_bitcoin_price():
    current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    url = 'https://api.coindesk.com/v1/bpi/currentprice.json'
    response = requests.get(url)

    if response.status_code == 200:
        data = response.json()
        bitcoin_price = data['bpi']['USD']['rate']
        return bitcoin_price, current_time
    else:
        return 'Error fetching data', current_time

@app.route('/assets/<path:path>')
def send_static(path):
    return send_from_directory('assets', path)

@app.route('/')
def index():
    global conversation_history
    return render_template('index.html', conversation=conversation_history)

@app.route('/submit', methods=['POST'])
def submit():
    user_input = request.json.get('user_input')

    tokens = word_tokenize(user_input)
    ps = PorterStemmer()
    stemmed_tokens = [ps.stem(token) for token in tokens]

    lemmatizer = WordNetLemmatizer()
    lemmatized_tokens = [lemmatizer.lemmatize(token) for token in tokens]

    sentiment = TextBlob(user_input).sentiment

    bitcoin_price, current_time = get_bitcoin_price()

    conversation_history.append("User: " + user_input)
    
    # NLTK processing for conversation history
    history_tokens = word_tokenize("<br>".join(conversation_history))
    history_stemmed_tokens = [ps.stem(token) for token in history_tokens]
    history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]


    model_input = prompt.format(message=user_input, history="<br>".join(conversation_history), database_tag=database_content, date_time=current_time, bitcoin_price=bitcoin_price, json_data=json_data,history_tokens=history_tokens,history_stemmed_tokens=history_stemmed_tokens,history_lemmatized_tokens=history_lemmatized_tokens)
    response = llm(model_input)

    bot_response = response.split('Response:')[1].strip()
    bot_response = bot_response.strip().replace('\n', '<br>')
    conversation_history.append("Bot: " + bot_response)

    conversation_html = '<br>'.join(conversation_history)

    return bot_response
##############################################################################################
@app.route('/add_data', methods=['GET', 'POST'])
def add_data():
    if request.method == 'POST':
        date = request.form['date']
        open_price = request.form['open_price']
        high_price = request.form['high_price']
        low_price = request.form['low_price']
        close_price = request.form['close_price']
        adj_close = request.form['adj_close']
        volume = request.form['volume']
        
        new_data = [date, open_price, high_price, low_price, close_price, adj_close, volume]

        with open('info.txt', 'a') as file:
            file.write('\t'.join(new_data) + '\n')

    return render_template('admin.html')
################################################################################################################################
@app.route('/clear_history')
def clear_history():
    global conversation_history
    conversation_history = []
    return 'Conversation history cleared'

with open('i.txt', 'r') as file:
    data = file.read()

if __name__ == "__main__":
    repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
    huggingfacehub_api_token = "hf" + data

    llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
                         repo_id=repo_id,
                         model_kwargs={
                             "temperature": 0.5,
                             "max_new_tokens": 512,
                             "top_p": 0.3,
                             "repetition_penalty": 1.2,
                             "num_beams": 3,
                             "length_penalty": 1.5,
                             "no_repeat_ngram_size": 2,
                             "early_stopping": True,
                             "num_return_sequences": 1,
                             "use_cache": True,
                             "task": "predictions",
                             "data_source": "financial_markets",
                             "historical_data_fetch": True,
                             "real-time_data_integration": True,
                             "feature_engineering": ["technical_indicators", "sentiment_analysis", "volume_analysis"],
                             "machine_learning_models": ["LSTM", "Random Forest", "ARIMA", "Gradient Boosting"],
                             "prediction_horizon": "short-term",
                             "evaluation_metrics": ["accuracy", "MSE", "MAE", "RMSE"],
                             "model_fine-tuning": True,
                             "interpretability_explanation": True,
                             "ensemble_methods": ["voting", "stacking"],
                             "hyperparameter_optimization": True,
                             "cross-validation": True,
                             "online_learning": True,
                         }
                         )
    app.run(host="0.0.0.0", port=7860)