Harshil24 commited on
Commit
694eae1
·
1 Parent(s): f54a21b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -1
app.py CHANGED
@@ -1,3 +1,172 @@
 
 
 
 
 
 
 
 
1
  import subprocess
2
 
3
- subprocess.run("ls")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+ import csv
3
+ import requests
4
+ import numpy as np
5
+ import pandas as pd
6
+ import os
7
+ import datetime
8
+ from tensorflow import keras
9
  import subprocess
10
 
11
+ def getdata():
12
+ #------------------------GETTING HTML TABLE DATA---------------------------
13
+ url = f'https://ltp.investingdaddy.com/detailed-options-chain.php'
14
+ response = requests.get(url)
15
+ if response.status_code == 200:
16
+ html_source = response.text
17
+ #print(html_source)
18
+ else:
19
+ print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
20
+
21
+ #------------------------FILTERING TABLE DATA-------------------------------
22
+ soup = BeautifulSoup(html_source, 'html.parser')
23
+ tables = soup.find_all('table')
24
+ ##price = soup.find('label', {'id':'future_val'})
25
+ ##price = price.text
26
+ ##prices.append(price)
27
+ ##return
28
+
29
+ if len(tables) >= 2:
30
+ second_table = tables[1]
31
+ else:
32
+ print("There are not enough tables in the HTML source.")
33
+
34
+ #-----------------------CONVERTING HTML TABLE DATA TO CSV--------------------
35
+ html_content = "<html>"+str(second_table)+"</html>"
36
+ soup = BeautifulSoup(html_content, 'html.parser')
37
+ table = soup.find('table', {'id': 'tech-companies-1'})
38
+ table_data = []
39
+ rows = table.find_all('tr')
40
+ for row in rows:
41
+ row_data = []
42
+ cols = row.find_all(['th', 'td'])
43
+ for col in cols:
44
+ row_data.append(col.get_text(strip=True))
45
+ table_data.append(row_data)
46
+
47
+ csv_file = 'sample.csv'
48
+ r=False
49
+ with open(csv_file, 'w', newline='') as csvfile:
50
+ csv_writer = csv.writer(csvfile)
51
+ for row_data in table_data:
52
+ if r:
53
+ csv_writer.writerow(row_data)
54
+ else:
55
+ r=True
56
+
57
+ print(f'Table data has been successfully written to {csv_file}.')
58
+
59
+
60
+ def get_his_data(m,d,h,min):
61
+
62
+ #------------------------GETTING HTML TABLE DATA---------------------------
63
+ url = f'https://ltp.investingdaddy.com/historical-option-chain.php?symbol=NIFTY&expiry=2023-09-07&filterdate1=2023-09-04&filtertime=09%3A15&filterdate=2023-{m}-{d}T{h}%3A{min}'
64
+ response = requests.get(url)
65
+ if response.status_code == 200:
66
+ html_source = response.text
67
+ #print(html_source)
68
+ else:
69
+ print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
70
+
71
+ #------------------------FILTERING TABLE DATA-------------------------------
72
+ soup = BeautifulSoup(html_source, 'html.parser')
73
+ tables = soup.find_all('table')
74
+ ##price = soup.find('label', {'id':'future_val'})
75
+ ##price = price.text
76
+ ##prices.append(price)
77
+ ##return
78
+
79
+ if len(tables) >= 2:
80
+ second_table = tables[1]
81
+ else:
82
+ print("There are not enough tables in the HTML source.")
83
+
84
+ #-----------------------CONVERTING HTML TABLE DATA TO CSV--------------------
85
+ html_content = "<html>"+str(second_table)+"</html>"
86
+ soup = BeautifulSoup(html_content, 'html.parser')
87
+ table = soup.find('table', {'id': 'tech-companies-1'})
88
+ table_data = []
89
+ rows = table.find_all('tr')
90
+ for row in rows:
91
+ row_data = []
92
+ cols = row.find_all(['th', 'td'])
93
+ for col in cols:
94
+ row_data.append(col.get_text(strip=True))
95
+ table_data.append(row_data)
96
+
97
+ csv_file = 'sample.csv'
98
+ r=False
99
+ with open(csv_file, 'w', newline='') as csvfile:
100
+ csv_writer = csv.writer(csvfile)
101
+ for row_data in table_data:
102
+ if r:
103
+ csv_writer.writerow(row_data)
104
+ else:
105
+ r=True
106
+
107
+ print(f'historical Table data has been successfully written to {csv_file}.')
108
+
109
+
110
+ def changedatashape():
111
+
112
+ # Load your CSV data into a Pandas DataFrame
113
+ df = pd.read_csv('sample.csv') # Replace 'your_data.csv' with your actual file path
114
+
115
+ # Check the shape of the original DataFrame (it should be 20x20)
116
+ #print("Original DataFrame Shape:", df.shape)
117
+
118
+ # Convert the DataFrame into a NumPy array
119
+ data = df.to_numpy()
120
+
121
+ # Reshape the data into a 1D array and transpose it
122
+ horizontal_data = data.flatten().reshape(1, -1)
123
+
124
+ # Check the shape of the reshaped data (it should be 1x400)
125
+ #print("Horizontal Data Shape:", horizontal_data.shape)
126
+
127
+ # If you want to save this reshaped data to a new CSV file:
128
+ horizontal_df = pd.DataFrame(horizontal_data)
129
+
130
+ # Save it to a new CSV file without row or column labels
131
+ horizontal_df.to_csv('sample2.csv', index=False, header=False)
132
+ os.remove('sample.csv')
133
+
134
+
135
+ def predict(m,historical=True):
136
+ if historical:
137
+ date = input('date:')
138
+ hour = input('hour:')
139
+ min = input('min')
140
+ get_his_data(m,date,hour,min)
141
+ else:
142
+ getdata()
143
+ changedatashape()
144
+
145
+ # Specify the path to your CSV file
146
+ csv_file_path = 'sample2.csv'
147
+
148
+ # Read the existing data to determine the number of columns and rows
149
+ with open(csv_file_path, 'r') as csv_file:
150
+ reader = csv.reader(csv_file)
151
+ data = list(reader)
152
+ num_columns = len(data[0]) if data else 0
153
+ num_rows = len(data)
154
+
155
+ # Generate the header row for the new column
156
+ new_column_header = 'sample names'
157
+
158
+ # Generate the values for the new column
159
+ new_column_values = ['sample{}'.format(i-1) for i in range(1, num_rows + 1)]
160
+
161
+ # Insert the new column into the data
162
+ for i in range(num_rows):
163
+ data[i].insert(0, new_column_values[i])
164
+
165
+ # Write the modified data back to the file
166
+ with open(csv_file_path, 'w', newline='') as csv_file:
167
+ writer = csv.writer(csv_file)
168
+ writer.writerow([new_column_header] + ['feature{}'.format(i+1) for i in range(1, num_columns + 1)])
169
+ writer.writerows(data)
170
+
171
+ if st.button("Generate Text"):
172
+ predict(historical=False)