Spaces:
Running
Running
Commit
·
38c75b1
1
Parent(s):
f89d0fc
modifying prompt to improve phi3; making llama3 default
Browse files- app/system_prompt.txt +2 -2
- app/variables.py +1 -1
app/system_prompt.txt
CHANGED
@@ -9,7 +9,8 @@ For example:
|
|
9 |
Ensure the response contains only this JSON object, with no additional text, formatting, or commentary.
|
10 |
|
11 |
# Important Details
|
12 |
-
|
|
|
13 |
- For map-related queries (e.g., "show me"), ALWAYS include "id," "geom", "name," and "acres" in the results, PLUS any other columns referenced in the query (e.g., in conditions, calculations, or subqueries). All columns used in the query MUST be returned in the results. This output structure is MANDATORY for all map-related queries.
|
14 |
- If the user specifies "protected" land or areas, only return records where "status" is "30x30-conserved" and "other-conserved".
|
15 |
- ONLY use LIMIT in your SQL queries if the user specifies a quantity (e.g., 'show me 5'). Otherwise, return all matching data without a limit.
|
@@ -26,7 +27,6 @@ Ensure the response contains only this JSON object, with no additional text, for
|
|
26 |
- Users may not be familiar with this data, so your explanation should be short, clear, and easily understandable. You MUST state which column(s) you used to gather their query, along with definition(s) of the column(s). Do NOT explain SQL commands.
|
27 |
- If the prompt is unrelated to the California dataset, provide examples of relevant queries that you can answer.
|
28 |
- If the user's query is unclear, DO NOT make assumptions. Instead, ask for clarification and provide examples of similar queries you can handle, using the columns or data available. You MUST ONLY deliver accurate results.
|
29 |
-
- Not every query will require SQL code, users may ask more information about values and columns in the table which you can answer based on the information in this prompt. For these cases, your "sql_query" field should be empty.
|
30 |
|
31 |
# Column Descriptions
|
32 |
- "established": The time range which the land was acquired, either "2024" or "pre-2024".
|
|
|
9 |
Ensure the response contains only this JSON object, with no additional text, formatting, or commentary.
|
10 |
|
11 |
# Important Details
|
12 |
+
|
13 |
+
- Not every query will require SQL code, users may ask more information about values and columns in the table which you can answer based on the information in this prompt. For these cases, your "sql_query" field should be empty.
|
14 |
- For map-related queries (e.g., "show me"), ALWAYS include "id," "geom", "name," and "acres" in the results, PLUS any other columns referenced in the query (e.g., in conditions, calculations, or subqueries). All columns used in the query MUST be returned in the results. This output structure is MANDATORY for all map-related queries.
|
15 |
- If the user specifies "protected" land or areas, only return records where "status" is "30x30-conserved" and "other-conserved".
|
16 |
- ONLY use LIMIT in your SQL queries if the user specifies a quantity (e.g., 'show me 5'). Otherwise, return all matching data without a limit.
|
|
|
27 |
- Users may not be familiar with this data, so your explanation should be short, clear, and easily understandable. You MUST state which column(s) you used to gather their query, along with definition(s) of the column(s). Do NOT explain SQL commands.
|
28 |
- If the prompt is unrelated to the California dataset, provide examples of relevant queries that you can answer.
|
29 |
- If the user's query is unclear, DO NOT make assumptions. Instead, ask for clarification and provide examples of similar queries you can handle, using the columns or data available. You MUST ONLY deliver accurate results.
|
|
|
30 |
|
31 |
# Column Descriptions
|
32 |
- "established": The time range which the land was acquired, either "2024" or "pre-2024".
|
app/variables.py
CHANGED
@@ -294,8 +294,8 @@ from langchain_openai import ChatOpenAI
|
|
294 |
import streamlit as st
|
295 |
|
296 |
llm_options = {
|
297 |
-
"llama-3.3": ChatOpenAI(model = "groq-tools", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
298 |
"llama3": ChatOpenAI(model = "llama3-sdsc", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
|
|
299 |
"phi3": ChatOpenAI(model = "phi3", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
300 |
"DeepSeek-R1-Distill-Qwen-32B": ChatOpenAI(model = "DeepSeek-R1-Distill-Qwen-32B", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
301 |
"watt": ChatOpenAI(model = "watt", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
|
|
294 |
import streamlit as st
|
295 |
|
296 |
llm_options = {
|
|
|
297 |
"llama3": ChatOpenAI(model = "llama3-sdsc", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
298 |
+
"llama-3.3": ChatOpenAI(model = "groq-tools", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
299 |
"phi3": ChatOpenAI(model = "phi3", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
300 |
"DeepSeek-R1-Distill-Qwen-32B": ChatOpenAI(model = "DeepSeek-R1-Distill-Qwen-32B", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|
301 |
"watt": ChatOpenAI(model = "watt", api_key=st.secrets['NRP_API_KEY'], base_url = "https://llm.nrp-nautilus.io/", temperature=0),
|