Dooratre commited on
Commit
ec9af71
·
verified ·
1 Parent(s): b554377

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -159
app.py CHANGED
@@ -1,172 +1,74 @@
1
- from flask import Flask, render_template, request, send_from_directory
2
- from datetime import datetime
3
- from langchain_community.llms import HuggingFaceHub
4
- from langchain.prompts import PromptTemplate
5
  import requests
6
- import json
7
- import nltk
8
- from textblob import TextBlob
9
- from nltk.tokenize import word_tokenize
10
- from nltk.stem import PorterStemmer
11
- from nltk.stem import WordNetLemmatizer
12
- import spacy
13
- from bs4 import BeautifulSoup
14
-
15
-
16
- # Download NLTK resources
17
- nltk.download('punkt')
18
- nltk.download('wordnet')
19
-
20
- # Download Spacy model
21
- def download_spacy_model():
22
- import spacy
23
- try:
24
- spacy.load("en_core_web_sm")
25
- except OSError:
26
- import spacy.cli
27
- spacy.cli.download("en_core_web_sm")
28
-
29
- download_spacy_model()
30
-
31
- nlp = spacy.load("en_core_web_sm")
32
 
33
  app = Flask(__name__)
 
 
 
 
34
 
35
- # Load the JSON data from the file
36
- with open('ai_chatbot_data.json', 'r') as file:
37
- json_data = json.load(file)
38
-
39
- # Updated prompt template for Bitcoin trading
40
- template = "🌟 **Analysis Report for BTC Trading & Prediction** 🌟\n\n📩 **User Message:**\n{message}\n\n💰 **BTC Price Now** 💹\nCurrent Price: ${bitcoin_price}\nTime: {date_time}\n\n📊 **BTC Historical Data** 📈\nCheck the historical data to analyze price trends.\nTrading Data from 01/01/2024 to {date_time}:\n{result}\n\n🗣️ **Current Conversation Overview** 💬\nRefer to user interactions for context.\nUser Interaction:\n{history}\n\n🤔 **User Sentiment Analysis** 📝\nAnalyze user sentiment for market insights.\nUser Sentiment: {sentiment}\n\n💻 **System Data** 📊\nUtilize system insights for decision-making.\nSystem Insights:\n{json_data}\n\n💡 **Response Recommendations** 💬\nBased on the data provided, suggest trading strategies or predictions.\nAI System Data: {json_data}\n\nResponse:"
41
- prompt = PromptTemplate(template=template, input_variables=["message", "sentiment", "history", "date_time", "bitcoin_price", "result", "json_data"])
42
- conversation_history = []
43
-
44
- MAX_HISTORY_LENGTH = 55
45
-
46
-
47
- def update_conversation_history(message):
48
- if len(conversation_history) >= MAX_HISTORY_LENGTH:
49
- conversation_history.pop(0)
50
- conversation_history.append(message)
51
-
52
- # Function to retrieve Bitcoin price
53
- def get_bitcoin_price():
54
- current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
55
- url = 'https://api.coindesk.com/v1/bpi/currentprice.json'
56
- response = requests.get(url)
57
-
58
- if response.status_code == 200:
59
- data = response.json()
60
- bitcoin_price = data['bpi']['USD']['rate']
61
- return bitcoin_price, current_time
62
- else:
63
- return 'Error fetching data', current_time
64
-
65
-
66
-
67
 
68
- def get_div_content(url):
69
- response = requests.get(url)
70
- soup = BeautifulSoup(response.content, 'html.parser')
71
-
72
- div_content = soup.find('div', {'id': '45'})
73
- if div_content:
74
- return div_content
75
- else:
76
- return "No div with id=45 found on the page."
77
 
78
- url = "https://dooratre-info.hf.space/?logs=container&__sign=eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sIm9uQmVoYWxmT2YiOnsia2luZCI6InVzZXIiLCJfaWQiOiI2NWIyYzMyNjJiZTk2NjBmMGIxMjg0MDAiLCJ1c2VyIjoiRG9vcmF0cmUifSwiaWF0IjoxNzEyNjgwNTY4LCJzdWIiOiIvc3BhY2VzL0Rvb3JhdHJlL2luZm8iLCJleHAiOjE3MTI3NjY5NjgsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.R_PX6Hw5SMheYTQWPGe1Qla9q8gVBU0mAFF_u8Iad06jSpZ9sPzZqquSowWn7PGVLRYBW21DnvqSwXIoNZ4CAA"
79
 
80
- result = get_div_content(url)
81
- print(result)
82
-
83
- @app.route('/assets/<path:path>')
84
- def send_static(path):
85
- return send_from_directory('assets', path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  @app.route('/')
88
  def index():
89
- global conversation_history
90
- return render_template('index.html', conversation=conversation_history)
91
-
92
- @app.route('/submit', methods=['POST'])
93
- def submit():
94
- user_input = request.json.get('user_input')
95
-
96
- doc = nlp(user_input)
97
- tokens = [token.text for token in doc]
98
-
99
- sentiment = TextBlob(user_input).sentiment
100
-
101
- ps = PorterStemmer()
102
- stemmed_tokens = [ps.stem(token) for token in tokens]
103
-
104
- lemmatizer = WordNetLemmatizer()
105
- lemmatized_tokens = [lemmatizer.lemmatize(token) for token in tokens]
106
-
107
- bitcoin_price, current_time = get_bitcoin_price()
108
-
109
- conversation_history.append("User: " + user_input)
110
-
111
- history_tokens = word_tokenize("\n".join(conversation_history))
112
- history_stemmed_tokens = [ps.stem(token) for token in history_tokens]
113
- history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]
114
-
115
- model_input = prompt.format(message=user_input, sentiment=sentiment, history="\n".join(conversation_history),
116
- result=result, date_time=current_time, bitcoin_price=bitcoin_price, json_data=json_data)
117
 
118
- response = llm(model_input, context="<br>".join(conversation_history))
119
-
120
- bot_response = response.split('Response:')[1].strip()
121
- bot_response = bot_response.strip().replace('\n', '<br>')
122
-
123
- update_conversation_history("Bot: " + bot_response)
124
-
125
- conversation_html = '<br>'.join(conversation_history)
126
-
127
- return bot_response
128
-
129
- @app.route('/clear_history')
130
- def clear_history():
131
- global conversation_history
132
- conversation_history = []
133
- return 'Conversation history cleared'
134
 
135
- with open('i.txt', 'r') as file:
136
- data = file.read()
137
 
138
- if __name__ == "__main__":
139
- repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
140
- huggingfacehub_api_token = "hf" + data
141
-
142
- llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
143
- repo_id=repo_id,
144
- model_kwargs={
145
- "temperature": 0.5,
146
- "max_new_tokens": 256,
147
- "top_p": 0.5,
148
- "repetition_penalty": 1.2,
149
- "num_beams": 3,
150
- "length_penalty": 1.2,
151
- "no_repeat_ngram_size": 2,
152
- "early_stopping": True,
153
- "num_return_sequences": 1,
154
- "use_cache": True,
155
- "task": "predictions",
156
- "data_source": "financial_markets",
157
- "historical_data_fetch": True,
158
- "real-time_data_integration": True,
159
- "feature_engineering": ["technical_indicators", "sentiment_analysis", "volume_analysis"],
160
- "machine_learning_models": ["LSTM", "Random Forest", "ARIMA", "Gradient Boosting"],
161
- "prediction_horizon": "short-term",
162
- "evaluation_metrics": ["accuracy", "MSE", "MAE", "RMSE"],
163
- "model_fine-tuning": True,
164
- "interpretability_explanation": True,
165
- "ensemble_methods": ["voting", "stacking"],
166
- "hyperparameter_optimization": True,
167
- "cross-validation": True,
168
- "online_learning": True,
169
- }
170
- )
171
-
172
- app.run(host="0.0.0.0", port=7860)
 
 
 
 
 
1
  import requests
2
+ from flask import Flask, render_template, request
3
+ import speech_recognition as sr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  app = Flask(__name__)
6
+ with open('i.txt', 'r') as file:
7
+ data = file.read()
8
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
9
+ headers = {"Authorization": f"Bearer hf{data}"}
10
 
11
+ recognizer = sr.Recognizer()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ def query(payload):
14
+ response = requests.post(API_URL, headers=headers, json=payload)
15
+ return response.json()
 
 
 
 
 
 
16
 
17
+ conversation_history = []
18
 
19
+ def generate_response(user_input):
20
+ new_query = {
21
+ "inputs": f"you are ai for help in anything you are created by Mr,Omar Nuwara he is made you \n\n make sure to help people in anything \n\ntask:complete the reesponse:\n\nconversation history:{conversation_history}\n\nuser message:{user_input}\n\nmake sure to response about it and don't generate alot of words just based on the user message \n\n\n\nresponse:",
22
+ "parameters": {
23
+ "top_k": 50,
24
+ "top_p": 0.9,
25
+ "temperature": 0.1,
26
+ "repetition_penalty": 1.2,
27
+ "max_new_tokens": 512,
28
+ "max_time": 0,
29
+ "return_full_text": True,
30
+ "num_return_sequences": 1,
31
+ "do_sample": False
32
+ },
33
+ "options": {
34
+ "use_cache": False,
35
+ "wait_for_model": False
36
+ }
37
+ }
38
+
39
+ output = query(new_query)
40
+
41
+ generated_text = output[0]['generated_text']
42
+
43
+ response_start = generated_text.find('response:') + len('response:')
44
+ response_end = generated_text.find('(end response)')
45
+
46
+ response_text = generated_text[response_start:response_end].strip()
47
+
48
+ note_index = response_text.find("Note:")
49
+ if note_index != -1:
50
+ response_text = response_text[:note_index].strip()
51
+
52
+ instruction_index = response_text.find("### Instruction:")
53
+ if instruction_index != -1:
54
+ response_text = response_text[:instruction_index].strip()
55
+
56
+ return response_text
57
 
58
  @app.route('/')
59
  def index():
60
+ return render_template('cont.html')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
+ @app.route('/chat', methods=['POST'])
63
+ def chat():
64
+ user_input = request.form['user_input']
65
+
66
+ # Generate AI response based on user input
67
+ response_text = generate_response(user_input)
68
+ conversation_history.append({"User": user_input, "AI": response_text})
69
+
70
+ return response_text
 
 
 
 
 
 
 
71
 
 
 
72
 
73
+ if __name__ == '__main__':
74
+ app.run(debug=True)