Dooratre commited on
Commit
db25548
·
verified ·
1 Parent(s): 2cf06ef

Delete python.txt

Browse files
Files changed (1) hide show
  1. python.txt +0 -173
python.txt DELETED
@@ -1,173 +0,0 @@
1
- from flask import Flask, render_template, request, send_from_directory
2
- from datetime import datetime
3
- from langchain_community.llms import HuggingFaceHub
4
- from langchain.prompts import PromptTemplate
5
- import requests
6
- import json
7
- import nltk
8
- from textblob import TextBlob
9
- from nltk.tokenize import word_tokenize
10
- from nltk.stem import PorterStemmer
11
- from nltk.stem import WordNetLemmatizer
12
- import tensorflow as tf
13
- from tensorflow import keras
14
- import spacy
15
- from bs4 import BeautifulSoup
16
-
17
- nltk.download('punkt')
18
- nltk.download('wordnet')
19
-
20
- def download_spacy_model():
21
- import spacy # Import spacy within the function scope
22
- try:
23
- spacy.load("en_core_web_sm")
24
- except OSError:
25
- import spacy.cli
26
- spacy.cli.download("en_core_web_sm")
27
-
28
- download_spacy_model()
29
-
30
- nlp = spacy.load("en_core_web_sm")
31
-
32
- app = Flask(__name__)
33
- with open('python.txt', 'r') as file:
34
- Python = file.read()
35
-
36
- # Load the JSON data from the file
37
- with open('ai_chatbot_data.json', 'r') as file:
38
- json_data = json.load(file)
39
-
40
-
41
-
42
-
43
- template = "Message: {message}\n\nSentiment Analysis: {sentiment}\n\nConversation Now Between you and user: {history}\n\nDate and Time: {date_time}\n\nBitcoin Price: ${bitcoin_price}\n\nBitcoin history from 1-jan-2024 to today the tidy is date-open-high-low-close-adj close-volum: {database_tag}\n\nYour system: {json_data}.\n\nCreated by this code:{Python}\n\nResponse:"
44
- prompt = PromptTemplate(template=template, input_variables=["message", "sentiment", "history", "date_time", "bitcoin_price", "database_tag","Python", "json_data"])
45
- conversation_history = []
46
-
47
- MAX_HISTORY_LENGTH = 55
48
-
49
- url = "https://dooratre-info.hf.space/"
50
-
51
- response = requests.get(url)
52
- soup = BeautifulSoup(response.content, 'html.parser')
53
-
54
- div_content = soup.find('div', {'id': '45'})
55
- if div_content:
56
- print(div_content)
57
- else:
58
- print("No div with id=45 found on the page.")
59
- database_tag=div_content
60
-
61
- def update_conversation_history(message):
62
- if len(conversation_history) >= MAX_HISTORY_LENGTH:
63
- conversation_history.pop(0)
64
- conversation_history.append(message)
65
-
66
-
67
- def get_bitcoin_price():
68
- current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
69
- url = 'https://api.coindesk.com/v1/bpi/currentprice.json'
70
- response = requests.get(url)
71
-
72
- if response.status_code == 200:
73
- data = response.json()
74
- bitcoin_price = data['bpi']['USD']['rate']
75
- return bitcoin_price, current_time
76
- else:
77
- return 'Error fetching data', current_time
78
-
79
- @app.route('/assets/<path:path>')
80
- def send_static(path):
81
- return send_from_directory('assets', path)
82
-
83
- @app.route('/')
84
- def index():
85
- global conversation_history
86
- return render_template('index.html', conversation=conversation_history)
87
-
88
- @app.route('/submit', methods=['POST'])
89
- def submit():
90
- user_input = request.json.get('user_input')
91
-
92
- doc = nlp(user_input)
93
- tokens = [token.text for token in doc]
94
-
95
- sentiment = TextBlob(user_input).sentiment
96
-
97
- # Add Spacy NLP processing here
98
-
99
- ps = PorterStemmer()
100
- stemmed_tokens = [ps.stem(token) for token in tokens]
101
-
102
- lemmatizer = WordNetLemmatizer()
103
- lemmatized_tokens = [lemmatizer.lemmatize(token) for token in tokens]
104
-
105
- sentiment = TextBlob(user_input).sentiment
106
-
107
- bitcoin_price, current_time = get_bitcoin_price()
108
-
109
- conversation_history.append("User: " + user_input)
110
-
111
- # NLTK processing for conversation history
112
- history_tokens = word_tokenize("<br>".join(conversation_history))
113
- history_stemmed_tokens = [ps.stem(token) for token in history_tokens]
114
- history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]
115
-
116
- model_input = prompt.format(message=user_input, sentiment=sentiment, history="<br>".join(conversation_history), database_tag=div_content, date_time=current_time, bitcoin_price=bitcoin_price, json_data=json_data,history_tokens=history_tokens,history_stemmed_tokens=history_stemmed_tokens,history_lemmatized_tokens=history_lemmatized_tokens,Python=Python)
117
-
118
- response = llm(model_input, context="<br>".join(conversation_history))
119
-
120
- bot_response = response.split('Response:')[1].strip()
121
- bot_response = bot_response.strip().replace('\n', '<br>')
122
-
123
- # Update the conversation history with bot's response
124
- update_conversation_history("You " + bot_response)
125
-
126
- conversation_html = '<br>'.join(conversation_history)
127
-
128
- return bot_response
129
-
130
- @app.route('/clear_history')
131
- def clear_history():
132
- global conversation_history
133
- conversation_history = []
134
- return 'Conversation history cleared'
135
-
136
- with open('i.txt', 'r') as file:
137
- data = file.read()
138
-
139
- if __name__ == "__main__":
140
- repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
141
- huggingfacehub_api_token = "hf" + data
142
-
143
- llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
144
- repo_id=repo_id,
145
- model_kwargs={
146
- "temperature": 0.1,
147
- "max_new_tokens": 1024,
148
- "top_p": 0.5,
149
- "repetition_penalty": 1.2,
150
- "num_beams": 3,
151
- "length_penalty": 1.2,
152
- "no_repeat_ngram_size": 2,
153
- "early_stopping": True,
154
- "num_return_sequences": 1,
155
- "use_cache": True,
156
- "task": "predictions",
157
- "data_source": "financial_markets",
158
- "historical_data_fetch": True,
159
- "real-time_data_integration": True,
160
- "feature_engineering": ["technical_indicators", "sentiment_analysis", "volume_analysis"],
161
- "machine_learning_models": ["LSTM", "Random Forest", "ARIMA", "Gradient Boosting"],
162
- "prediction_horizon": "short-term",
163
- "evaluation_metrics": ["accuracy", "MSE", "MAE", "RMSE"],
164
- "model_fine-tuning": True,
165
- "interpretability_explanation": True,
166
- "ensemble_methods": ["voting", "stacking"],
167
- "hyperparameter_optimization": True,
168
- "cross-validation": True,
169
- "online_learning": True,
170
- }
171
- )
172
-
173
- app.run(host="0.0.0.0", port=7860)