Spaces:
Sleeping
Sleeping
File size: 6,119 Bytes
694eae1 3a00a2a 1cbd5ac 4b0b21d 5e8b514 694eae1 1af8d03 c13f2e4 51481bf 576d502 c13f2e4 1af8d03 576d502 c13f2e4 1af8d03 c13f2e4 51481bf 694eae1 51481bf 694eae1 51481bf cf1e2af c13f2e4 1af8d03 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
from bs4 import BeautifulSoup
import csv
import requests
import numpy as np
import pandas as pd
import os
import datetime
from tensorflow import keras
import subprocess
import streamlit as st
import jpype
def getdata():
#------------------------GETTING HTML TABLE DATA---------------------------
url = f'https://ltp.investingdaddy.com/detailed-options-chain.php'
response = requests.get(url)
if response.status_code == 200:
html_source = response.text
#print(html_source)
else:
print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
#------------------------FILTERING TABLE DATA-------------------------------
soup = BeautifulSoup(html_source, 'html.parser')
tables = soup.find_all('table')
##price = soup.find('label', {'id':'future_val'})
##price = price.text
##prices.append(price)
##return
if len(tables) >= 2:
second_table = tables[1]
else:
print("There are not enough tables in the HTML source.")
#-----------------------CONVERTING HTML TABLE DATA TO CSV--------------------
html_content = "<html>"+str(second_table)+"</html>"
soup = BeautifulSoup(html_content, 'html.parser')
table = soup.find('table', {'id': 'tech-companies-1'})
table_data = []
rows = table.find_all('tr')
for row in rows:
row_data = []
cols = row.find_all(['th', 'td'])
for col in cols:
row_data.append(col.get_text(strip=True))
table_data.append(row_data)
csv_file = 'sample.csv'
r=False
with open(csv_file, 'w', newline='') as csvfile:
csv_writer = csv.writer(csvfile)
for row_data in table_data:
if r:
csv_writer.writerow(row_data)
else:
r=True
print(f'Table data has been successfully written to {csv_file}.')
def get_his_data(m,d,h,min):
#------------------------GETTING HTML TABLE DATA---------------------------
url = f'https://ltp.investingdaddy.com/historical-option-chain.php?symbol=NIFTY&expiry=2023-09-07&filterdate1=2023-09-04&filtertime=09%3A15&filterdate=2023-{m}-{d}T{h}%3A{min}'
response = requests.get(url)
if response.status_code == 200:
html_source = response.text
#print(html_source)
else:
print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
#------------------------FILTERING TABLE DATA-------------------------------
soup = BeautifulSoup(html_source, 'html.parser')
tables = soup.find_all('table')
##price = soup.find('label', {'id':'future_val'})
##price = price.text
##prices.append(price)
##return
if len(tables) >= 2:
second_table = tables[1]
else:
print("There are not enough tables in the HTML source.")
#-----------------------CONVERTING HTML TABLE DATA TO CSV--------------------
html_content = "<html>"+str(second_table)+"</html>"
soup = BeautifulSoup(html_content, 'html.parser')
table = soup.find('table', {'id': 'tech-companies-1'})
table_data = []
rows = table.find_all('tr')
for row in rows:
row_data = []
cols = row.find_all(['th', 'td'])
for col in cols:
row_data.append(col.get_text(strip=True))
table_data.append(row_data)
csv_file = 'sample.csv'
r=False
with open(csv_file, 'w', newline='') as csvfile:
csv_writer = csv.writer(csvfile)
for row_data in table_data:
if r:
csv_writer.writerow(row_data)
else:
r=True
print(f'historical Table data has been successfully written to {csv_file}.')
def changedatashape():
# Load your CSV data into a Pandas DataFrame
df = pd.read_csv('sample.csv') # Replace 'your_data.csv' with your actual file path
# Check the shape of the original DataFrame (it should be 20x20)
#print("Original DataFrame Shape:", df.shape)
# Convert the DataFrame into a NumPy array
data = df.to_numpy()
# Reshape the data into a 1D array and transpose it
horizontal_data = data.flatten().reshape(1, -1)
# Check the shape of the reshaped data (it should be 1x400)
#print("Horizontal Data Shape:", horizontal_data.shape)
# If you want to save this reshaped data to a new CSV file:
horizontal_df = pd.DataFrame(horizontal_data)
# Save it to a new CSV file without row or column labels
horizontal_df.to_csv('sample2.csv', index=False, header=False)
os.remove('sample.csv')
#jpype.startJVM(jpype.getDefaultJVMPath())
def generate_result():
data = pd.read_csv("sample2.csv")
st.write(data)
# Define your Java command as a string
#java_command = "java -version"
#os.system("docker build -t my-java-app")
# Run the Java command in a subprocess
#jpype.runScript(java_command)
def predict(m,historical=True):
if historical:
date = input('date:')
hour = input('hour:')
min = input('min')
get_his_data(m,date,hour,min)
else:
getdata()
changedatashape()
generate_result()
# Specify the path to your CSV file
csv_file_path = 'sample2.csv'
# Read the existing data to determine the number of columns and rows
with open(csv_file_path, 'r') as csv_file:
reader = csv.reader(csv_file)
data = list(reader)
num_columns = len(data[0]) if data else 0
num_rows = len(data)
# Generate the header row for the new column
new_column_header = 'sample names'
# Generate the values for the new column
new_column_values = ['sample{}'.format(i-1) for i in range(1, num_rows + 1)]
# Insert the new column into the data
for i in range(num_rows):
data[i].insert(0, new_column_values[i])
# Write the modified data back to the file
with open(csv_file_path, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([new_column_header] + ['feature{}'.format(i+1) for i in range(1, num_columns + 1)])
writer.writerows(data)
if st.button("Predict result"):
predict(9,historical=False)
if 1==2:
predi = pd.read_csv('predictions.csv')
if 0.5<predi.iloc[0,1]<0.63:
st.write("increase")
else:
st.write("decrease")
# Shutdown the JVM when you're done
#jpype.shutdownJVM() |