AI_prihars / app.py
Harshil24's picture
Update app.py
576d502
from bs4 import BeautifulSoup
import csv
import requests
import numpy as np
import pandas as pd
import os
import datetime
from tensorflow import keras
import subprocess
import streamlit as st
import jpype
def getdata():
#------------------------GETTING HTML TABLE DATA---------------------------
url = f'https://ltp.investingdaddy.com/detailed-options-chain.php'
response = requests.get(url)
if response.status_code == 200:
html_source = response.text
#print(html_source)
else:
print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
#------------------------FILTERING TABLE DATA-------------------------------
soup = BeautifulSoup(html_source, 'html.parser')
tables = soup.find_all('table')
##price = soup.find('label', {'id':'future_val'})
##price = price.text
##prices.append(price)
##return
if len(tables) >= 2:
second_table = tables[1]
else:
print("There are not enough tables in the HTML source.")
#-----------------------CONVERTING HTML TABLE DATA TO CSV--------------------
html_content = "<html>"+str(second_table)+"</html>"
soup = BeautifulSoup(html_content, 'html.parser')
table = soup.find('table', {'id': 'tech-companies-1'})
table_data = []
rows = table.find_all('tr')
for row in rows:
row_data = []
cols = row.find_all(['th', 'td'])
for col in cols:
row_data.append(col.get_text(strip=True))
table_data.append(row_data)
csv_file = 'sample.csv'
r=False
with open(csv_file, 'w', newline='') as csvfile:
csv_writer = csv.writer(csvfile)
for row_data in table_data:
if r:
csv_writer.writerow(row_data)
else:
r=True
print(f'Table data has been successfully written to {csv_file}.')
def get_his_data(m,d,h,min):
#------------------------GETTING HTML TABLE DATA---------------------------
url = f'https://ltp.investingdaddy.com/historical-option-chain.php?symbol=NIFTY&expiry=2023-09-07&filterdate1=2023-09-04&filtertime=09%3A15&filterdate=2023-{m}-{d}T{h}%3A{min}'
response = requests.get(url)
if response.status_code == 200:
html_source = response.text
#print(html_source)
else:
print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
#------------------------FILTERING TABLE DATA-------------------------------
soup = BeautifulSoup(html_source, 'html.parser')
tables = soup.find_all('table')
##price = soup.find('label', {'id':'future_val'})
##price = price.text
##prices.append(price)
##return
if len(tables) >= 2:
second_table = tables[1]
else:
print("There are not enough tables in the HTML source.")
#-----------------------CONVERTING HTML TABLE DATA TO CSV--------------------
html_content = "<html>"+str(second_table)+"</html>"
soup = BeautifulSoup(html_content, 'html.parser')
table = soup.find('table', {'id': 'tech-companies-1'})
table_data = []
rows = table.find_all('tr')
for row in rows:
row_data = []
cols = row.find_all(['th', 'td'])
for col in cols:
row_data.append(col.get_text(strip=True))
table_data.append(row_data)
csv_file = 'sample.csv'
r=False
with open(csv_file, 'w', newline='') as csvfile:
csv_writer = csv.writer(csvfile)
for row_data in table_data:
if r:
csv_writer.writerow(row_data)
else:
r=True
print(f'historical Table data has been successfully written to {csv_file}.')
def changedatashape():
# Load your CSV data into a Pandas DataFrame
df = pd.read_csv('sample.csv') # Replace 'your_data.csv' with your actual file path
# Check the shape of the original DataFrame (it should be 20x20)
#print("Original DataFrame Shape:", df.shape)
# Convert the DataFrame into a NumPy array
data = df.to_numpy()
# Reshape the data into a 1D array and transpose it
horizontal_data = data.flatten().reshape(1, -1)
# Check the shape of the reshaped data (it should be 1x400)
#print("Horizontal Data Shape:", horizontal_data.shape)
# If you want to save this reshaped data to a new CSV file:
horizontal_df = pd.DataFrame(horizontal_data)
# Save it to a new CSV file without row or column labels
horizontal_df.to_csv('sample2.csv', index=False, header=False)
os.remove('sample.csv')
#jpype.startJVM(jpype.getDefaultJVMPath())
def generate_result():
data = pd.read_csv("sample2.csv")
st.write(data)
# Define your Java command as a string
#java_command = "java -version"
#os.system("docker build -t my-java-app")
# Run the Java command in a subprocess
#jpype.runScript(java_command)
def predict(m,historical=True):
if historical:
date = input('date:')
hour = input('hour:')
min = input('min')
get_his_data(m,date,hour,min)
else:
getdata()
changedatashape()
generate_result()
# Specify the path to your CSV file
csv_file_path = 'sample2.csv'
# Read the existing data to determine the number of columns and rows
with open(csv_file_path, 'r') as csv_file:
reader = csv.reader(csv_file)
data = list(reader)
num_columns = len(data[0]) if data else 0
num_rows = len(data)
# Generate the header row for the new column
new_column_header = 'sample names'
# Generate the values for the new column
new_column_values = ['sample{}'.format(i-1) for i in range(1, num_rows + 1)]
# Insert the new column into the data
for i in range(num_rows):
data[i].insert(0, new_column_values[i])
# Write the modified data back to the file
with open(csv_file_path, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([new_column_header] + ['feature{}'.format(i+1) for i in range(1, num_columns + 1)])
writer.writerows(data)
if st.button("Predict result"):
predict(9,historical=False)
if 1==2:
predi = pd.read_csv('predictions.csv')
if 0.5<predi.iloc[0,1]<0.63:
st.write("increase")
else:
st.write("decrease")
# Shutdown the JVM when you're done
#jpype.shutdownJVM()