Shyamnath commited on
Commit
f698b3b
·
verified ·
1 Parent(s): 6e74bb6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -87
app.py CHANGED
@@ -1,87 +1,87 @@
1
- import os
2
- import pandas as pd
3
- import gradio as gr
4
- from langchain_google_genai import ChatGoogleGenerativeAI
5
- from langchain_experimental.agents import create_pandas_dataframe_agent
6
- from langchain_core.prompts import PromptTemplate
7
- # Set up API key for Google Gemini
8
- os.environ["GOOGLE_API_KEY"] = "AIzaSyDSorjiEVV2KCWelkDLFxQsju3KDQOF344" # Replace with actual API key
9
-
10
- # Initialize the LLM
11
- llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
12
-
13
- # Placeholder for agent and dataframe
14
- agent = None
15
- df = None
16
-
17
- # Define the function to handle CSV uploads and set up the LangChain agent
18
- def handle_file_upload(file):
19
- global agent, df
20
- # Check if file has .csv extension
21
- if not file.name.endswith(".csv"):
22
- return "Error: Please upload a valid CSV file.", None
23
-
24
- # Load the uploaded file into a DataFrame
25
- try:
26
- df = pd.read_csv(file) # Read directly from the file object
27
- # Create a new LangChain agent with the uploaded DataFrame
28
- agent = create_pandas_dataframe_agent(llm, df, verbose=True, allow_dangerous_code=True)
29
- return "CSV uploaded successfully. You can now ask questions about the data.", df
30
- except Exception as e:
31
- return f"Error reading CSV file: {e}", None
32
-
33
- # Define the function to process the user query
34
- def answer_query(query):
35
- if agent is None:
36
- return "Please upload a CSV file first."
37
-
38
- # Invoke the agent with the query
39
-
40
- formatted_query = PromptTemplate.from_template(
41
- '''
42
- Please act as a data analyst and respond to my queries with insights from the provided dataset.
43
- If your response involves numeric data or comparisons, format the answer in a clear tabular form whenever
44
- it enhances readability and clarity.
45
- Provide analyses that highlight trends, patterns, and notable details in the data, and use tabular format
46
- for presenting summaries, comparisons, or grouped data and whenever user asks listing or something similar
47
- to help illustrate your findings effectively. Additionally, interpret any findings with context and data-driven
48
- reasoning as a skilled data analyst would. Also make sure not to give any data that is not asked by the user or
49
- not relevant to the given context
50
- Keep the above said details in mind and answer the below query:
51
- Query:
52
- {query}
53
- '''
54
- )
55
- response = agent.invoke(query)
56
-
57
- # Check if the response contains tabular data
58
- if isinstance(response, pd.DataFrame):
59
- return response # Display as table if it's a DataFrame
60
- else:
61
- # Format response as Markdown
62
- return f"**Response:**\n\n{response['output']}"
63
-
64
- # Create the Gradio interface
65
- with gr.Blocks() as iface:
66
- gr.Markdown("# ZEN-Analyser")
67
- gr.Markdown("Upload a CSV file to view the data and ask questions about it.")
68
-
69
- # File upload component
70
- file_input = gr.File(label="Upload CSV", file_types=[".csv"])
71
- # Dataframe display for the uploaded CSV
72
- data_output = gr.DataFrame(label="Uploaded Data")
73
- # Textbox for entering queries
74
- query_input = gr.Textbox(label="Enter your query")
75
- # Markdown component for displaying the agent's response with Markdown support
76
- response_output = gr.Markdown(label="Response")
77
-
78
- # Button to trigger query processing
79
- query_button = gr.Button("Submit Query")
80
-
81
- # Define event actions
82
- file_input.upload(handle_file_upload, file_input, [response_output, data_output])
83
- query_button.click(answer_query, query_input, response_output)
84
-
85
- # Launch the Gradio app
86
- iface.launch()
87
-
 
1
+ import os
2
+ import pandas as pd
3
+ import gradio as gr
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain_experimental.agents import create_pandas_dataframe_agent
6
+ from langchain_core.prompts import PromptTemplate
7
+ # Set up API key for Google Gemini
8
+ os.environ["GOOGLE_API_KEY"] = "AIzaSyDSorjiEVV2KCWelkDLFxQsju3KDQOF344" # Replace with actual API key
9
+
10
+ # Initialize the LLM
11
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
12
+
13
+ # Placeholder for agent and dataframe
14
+ agent = None
15
+ df = None
16
+
17
+ # Define the function to handle CSV uploads and set up the LangChain agent
18
+ def handle_file_upload(file):
19
+ global agent, df
20
+ # Check if file has .csv extension
21
+ if not file.name.endswith(".csv"):
22
+ return "Error: Please upload a valid CSV file.", None
23
+
24
+ # Load the uploaded file into a DataFrame
25
+ try:
26
+ df = pd.read_csv(file) # Read directly from the file object
27
+ # Create a new LangChain agent with the uploaded DataFrame
28
+ agent = create_pandas_dataframe_agent(llm, df, verbose=True, allow_dangerous_code=True)
29
+ return "CSV uploaded successfully. You can now ask questions about the data.", df
30
+ except Exception as e:
31
+ return f"Error reading CSV file: {e}", None
32
+
33
+ # Define the function to process the user query
34
+ def answer_query(query):
35
+ if agent is None:
36
+ return "Please upload a CSV file first."
37
+
38
+ # Invoke the agent with the query
39
+
40
+ formatted_query = PromptTemplate.from_template(
41
+ '''
42
+ Please act as a data analyst and respond to my queries with insights from the provided dataset.
43
+ If your response involves numeric data or comparisons, format the answer in a clear tabular form whenever
44
+ it enhances readability and clarity.
45
+ Provide analyses that highlight trends, patterns, and notable details in the data, and use tabular format
46
+ for presenting summaries, comparisons, or grouped data and whenever user asks listing or something similar
47
+ to help illustrate your findings effectively. Additionally, interpret any findings with context and data-driven
48
+ reasoning as a skilled data analyst would. Also make sure not to give any data that is not asked by the user or
49
+ not relevant to the given context
50
+ Keep the above said details in mind and answer the below query:
51
+ Query:
52
+ {query}
53
+ '''
54
+ )
55
+ response = agent.invoke(query)
56
+
57
+ # Check if the response contains tabular data
58
+ if isinstance(response, pd.DataFrame):
59
+ return response # Display as table if it's a DataFrame
60
+ else:
61
+ # Format response as Markdown
62
+ return f"**Response:**\n\n{response['output']}"
63
+
64
+ # Create the Gradio interface
65
+ with gr.Blocks() as iface:
66
+ gr.Markdown("# ZEN-Analyser")
67
+ gr.Markdown("Upload a CSV file to view the data and ask questions about it.")
68
+
69
+ # File upload component
70
+ file_input = gr.File(label="Upload CSV", file_types=[".csv"])
71
+ # Dataframe display for the uploaded CSV
72
+ data_output = gr.DataFrame(label="Uploaded Data")
73
+ # Textbox for entering queries
74
+ query_input = gr.Textbox(label="Enter your query")
75
+ # Markdown component for displaying the agent's response with Markdown support
76
+ response_output = gr.Markdown(label="Response")
77
+
78
+ # Button to trigger query processing
79
+ query_button = gr.Button("Submit Query")
80
+
81
+ # Define event actions
82
+ file_input.upload(handle_file_upload, file_input, [response_output, data_output])
83
+ query_button.click(answer_query, query_input, response_output)
84
+
85
+ # Launch the Gradio app
86
+ iface.launch()
87
+