DrishtiSharma commited on
Commit
39a3df1
·
verified ·
1 Parent(s): 26a15b4

Create tab1_works.py

Browse files
Files changed (1) hide show
  1. mylab/tab1_works.py +190 -0
mylab/tab1_works.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import plotly.express as px
4
+ from pandasai import Agent
5
+ from langchain_community.embeddings.openai import OpenAIEmbeddings
6
+ from langchain_community.vectorstores import FAISS
7
+ from langchain_openai import ChatOpenAI
8
+ from langchain.chains import RetrievalQA
9
+ from langchain.schema import Document
10
+ import os
11
+ import re
12
+
13
+ # Set title
14
+ st.title("Data Analyzer")
15
+
16
+ # API keys
17
+ api_key = os.getenv("OPENAI_API_KEY")
18
+ pandasai_api_key = os.getenv("PANDASAI_API_KEY")
19
+
20
+ if not api_key or not pandasai_api_key:
21
+ st.warning("API keys for OpenAI or PandasAI are missing. Ensure both keys are set in environment variables.")
22
+
23
+ # Add session reset button
24
+ if st.button("Reset Session"):
25
+ for key in list(st.session_state.keys()):
26
+ del st.session_state[key]
27
+ st.experimental_rerun()
28
+
29
+ # Function to validate and clean dataset
30
+ def validate_and_clean_dataset(df):
31
+ # Rename columns for consistency
32
+ df.columns = [col.strip().lower().replace(" ", "_") for col in df.columns]
33
+ # Check for missing values
34
+ if df.isnull().values.any():
35
+ st.warning("Dataset contains missing values. Consider cleaning the data.")
36
+ return df
37
+
38
+ # Function to load datasets into session
39
+ def load_dataset_into_session():
40
+ input_option = st.radio(
41
+ "Select Dataset Input:",
42
+ ["Use Repo Directory Dataset", "Use Hugging Face Dataset", "Upload CSV File"],
43
+ )
44
+
45
+ # Option 1: Load dataset from the repo directory
46
+ if input_option == "Use Repo Directory Dataset":
47
+ file_path = "./source/test.csv"
48
+ if st.button("Load Dataset"):
49
+ try:
50
+ st.session_state.df = pd.read_csv(file_path)
51
+ st.session_state.df = validate_and_clean_dataset(st.session_state.df)
52
+ st.success(f"File loaded successfully from '{file_path}'!")
53
+ except Exception as e:
54
+ st.error(f"Error loading dataset from the repo directory: {e}")
55
+
56
+ # Option 2: Load dataset from Hugging Face
57
+ elif input_option == "Use Hugging Face Dataset":
58
+ dataset_name = st.text_input(
59
+ "Enter Hugging Face Dataset Name:", value="HUPD/hupd"
60
+ )
61
+ if st.button("Load Hugging Face Dataset"):
62
+ try:
63
+ from datasets import load_dataset
64
+ dataset = load_dataset(dataset_name, split="train", trust_remote_code=True)
65
+ if hasattr(dataset, "to_pandas"):
66
+ st.session_state.df = dataset.to_pandas()
67
+ else:
68
+ st.session_state.df = pd.DataFrame(dataset)
69
+ st.session_state.df = validate_and_clean_dataset(st.session_state.df)
70
+ st.success(f"Hugging Face Dataset '{dataset_name}' loaded successfully!")
71
+ except Exception as e:
72
+ st.error(f"Error loading Hugging Face dataset: {e}")
73
+
74
+ # Option 3: Upload CSV File
75
+ elif input_option == "Upload CSV File":
76
+ uploaded_file = st.file_uploader("Upload a CSV File:", type=["csv"])
77
+ if uploaded_file:
78
+ try:
79
+ st.session_state.df = pd.read_csv(uploaded_file)
80
+ st.session_state.df = validate_and_clean_dataset(st.session_state.df)
81
+ st.success("File uploaded successfully!")
82
+ except Exception as e:
83
+ st.error(f"Error reading uploaded file: {e}")
84
+
85
+ load_dataset_into_session()
86
+
87
+ # Check if the dataset and API keys are loaded
88
+ if "df" in st.session_state and api_key and pandasai_api_key:
89
+ # Set API keys
90
+ os.environ["OPENAI_API_KEY"] = api_key
91
+ os.environ["PANDASAI_API_KEY"] = pandasai_api_key
92
+
93
+ df = st.session_state.df
94
+ st.write("Dataset Preview:")
95
+ st.write(df.head()) # Ensure the dataset preview is displayed only once
96
+
97
+ # Set up PandasAI Agent
98
+ try:
99
+ agent = Agent(df)
100
+ st.info("PandasAI Agent initialized successfully.")
101
+ except Exception as e:
102
+ st.error(f"Error initializing PandasAI Agent: {str(e)}")
103
+
104
+ # Convert dataframe into documents
105
+ try:
106
+ documents = [
107
+ Document(
108
+ page_content=", ".join([f"{col}: {row[col]}" for col in df.columns]),
109
+ metadata={"index": index}
110
+ )
111
+ for index, row in df.iterrows()
112
+ ]
113
+ st.info("Documents created successfully for RAG.")
114
+ except Exception as e:
115
+ st.error(f"Error creating documents for RAG: {str(e)}")
116
+
117
+ # Set up RAG
118
+ try:
119
+ embeddings = OpenAIEmbeddings()
120
+ vectorstore = FAISS.from_documents(documents, embeddings)
121
+ retriever = vectorstore.as_retriever()
122
+ qa_chain = RetrievalQA.from_chain_type(
123
+ llm=ChatOpenAI(),
124
+ chain_type="stuff",
125
+ retriever=retriever
126
+ )
127
+ st.info("RAG setup completed successfully.")
128
+ except Exception as e:
129
+ st.error(f"Error setting up RAG: {str(e)}")
130
+
131
+ # Create tabs
132
+ tab1, tab2, tab3 = st.tabs(["PandasAI Analysis", "RAG Q&A", "Data Visualization"])
133
+
134
+ with tab1:
135
+ st.subheader("Data Analysis with PandasAI")
136
+ pandas_question = st.text_input("Ask a question about the dataset (PandasAI):")
137
+ if pandas_question:
138
+ try:
139
+ result = agent.chat(pandas_question)
140
+ st.write("PandasAI Answer:", result)
141
+ if hasattr(agent, "last_output"):
142
+ st.write("PandasAI Intermediate Output:", agent.last_output)
143
+ except Exception as e:
144
+ st.error(f"PandasAI encountered an error: {str(e)}")
145
+ # Fallback: Direct pandas filtering
146
+ if "patent_number" in pandas_question.lower() and "decision" in pandas_question.lower():
147
+ try:
148
+ match = re.search(r'\d{7,}', pandas_question)
149
+ if match:
150
+ patent_number = match.group()
151
+ decision = df.loc[df['patent_number'] == int(patent_number), 'decision']
152
+ st.write(f"Fallback Answer: The decision for patent {patent_number} is '{decision.iloc[0]}'.")
153
+ else:
154
+ st.write("Could not extract patent number from the query.")
155
+ except Exception as fallback_error:
156
+ st.error(f"Fallback processing failed: {fallback_error}")
157
+
158
+ with tab2:
159
+ st.subheader("Q&A with RAG")
160
+ rag_question = st.text_input("Ask a question about the dataset (RAG):")
161
+ if rag_question:
162
+ try:
163
+ result = qa_chain.run(rag_question)
164
+ st.write("RAG Answer:", result)
165
+ except Exception as e:
166
+ st.error(f"RAG encountered an error: {str(e)}")
167
+
168
+ with tab3:
169
+ st.subheader("Data Visualization")
170
+ viz_question = st.text_input("What kind of graph would you like? (e.g., 'Show a scatter plot of salary vs experience')")
171
+ if viz_question:
172
+ try:
173
+ result = agent.chat(viz_question)
174
+ code_pattern = r'```python\n(.*?)\n```'
175
+ code_match = re.search(code_pattern, result, re.DOTALL)
176
+
177
+ if code_match:
178
+ viz_code = code_match.group(1)
179
+ exec(viz_code)
180
+ else:
181
+ st.write("Unable to generate the graph. Showing fallback example.")
182
+ fig = px.scatter(df, x=df.columns[0], y=df.columns[1])
183
+ st.plotly_chart(fig)
184
+ except Exception as e:
185
+ st.error(f"An error occurred during visualization: {str(e)}")
186
+ else:
187
+ if not api_key:
188
+ st.warning("Please set the OpenAI API key in environment variables.")
189
+ if not pandasai_api_key:
190
+ st.warning("Please set the PandasAI API key in environment variables.")