Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -45,7 +45,7 @@ development_texts = [
|
|
45 |
# --- Chroma Client Settings (in-memory) ---
|
46 |
client_settings = Settings(
|
47 |
chroma_api_impl="local",
|
48 |
-
persist_directory=None #
|
49 |
)
|
50 |
|
51 |
# --- Preprocessing & Embeddings ---
|
@@ -55,7 +55,7 @@ development_docs = splitter.create_documents(development_texts)
|
|
55 |
|
56 |
embeddings = OpenAIEmbeddings(
|
57 |
model="text-embedding-3-large",
|
58 |
-
openai_api_key=os.environ.get("OPENAI_API_KEY") #
|
59 |
)
|
60 |
|
61 |
# Create vector stores using local in-memory Chroma
|
@@ -89,8 +89,8 @@ development_tool = create_retriever_tool(
|
|
89 |
tools = [research_tool, development_tool]
|
90 |
|
91 |
# --- Agent and Workflow Functions ---
|
|
|
92 |
class AgentState(TypedDict):
|
93 |
-
# Using only AIMessage | HumanMessage
|
94 |
messages: Annotated[Sequence[AIMessage | HumanMessage], add_messages]
|
95 |
|
96 |
def agent(state: AgentState):
|
@@ -127,8 +127,6 @@ Otherwise, just answer directly.
|
|
127 |
if response.status_code == 200:
|
128 |
response_text = response.json()['choices'][0]['message']['content']
|
129 |
logger.info(f"DeepSeek response: {response_text}")
|
130 |
-
|
131 |
-
# Format the response to call the right tool
|
132 |
if "SEARCH_RESEARCH:" in response_text:
|
133 |
query = response_text.split("SEARCH_RESEARCH:")[1].strip()
|
134 |
results = research_retriever.invoke(query)
|
|
|
45 |
# --- Chroma Client Settings (in-memory) ---
|
46 |
client_settings = Settings(
|
47 |
chroma_api_impl="local",
|
48 |
+
persist_directory=None # Use None for ephemeral in-memory DB; or specify a folder path to persist data.
|
49 |
)
|
50 |
|
51 |
# --- Preprocessing & Embeddings ---
|
|
|
55 |
|
56 |
embeddings = OpenAIEmbeddings(
|
57 |
model="text-embedding-3-large",
|
58 |
+
openai_api_key=os.environ.get("OPENAI_API_KEY") # Set this in your HF Secrets.
|
59 |
)
|
60 |
|
61 |
# Create vector stores using local in-memory Chroma
|
|
|
89 |
tools = [research_tool, development_tool]
|
90 |
|
91 |
# --- Agent and Workflow Functions ---
|
92 |
+
# Note: Using only AIMessage and HumanMessage for message types.
|
93 |
class AgentState(TypedDict):
|
|
|
94 |
messages: Annotated[Sequence[AIMessage | HumanMessage], add_messages]
|
95 |
|
96 |
def agent(state: AgentState):
|
|
|
127 |
if response.status_code == 200:
|
128 |
response_text = response.json()['choices'][0]['message']['content']
|
129 |
logger.info(f"DeepSeek response: {response_text}")
|
|
|
|
|
130 |
if "SEARCH_RESEARCH:" in response_text:
|
131 |
query = response_text.split("SEARCH_RESEARCH:")[1].strip()
|
132 |
results = research_retriever.invoke(query)
|