Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -153,18 +153,53 @@ def ask_ollama(user_message, model='llama-3.3-70b-versatile', system_prompt=sear
|
|
153 |
"1": "Error parsing response. Please try again.",
|
154 |
"2": "Error parsing response. Please try again."
|
155 |
}
|
156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
def search_web(topic, max_references=5, data_type="pdf"):
|
158 |
-
"""Search the web using
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
def analyze_pdf_novelty(patent_background, url, data_type="pdf"):
|
170 |
"""Extract full document text from PDF or background from patent and evaluate novelty"""
|
|
|
153 |
"1": "Error parsing response. Please try again.",
|
154 |
"2": "Error parsing response. Please try again."
|
155 |
}
|
156 |
+
# Using DuckDuckGo API
|
157 |
+
# def search_web(topic, max_references=5, data_type="pdf"):
|
158 |
+
# """Search the web using DuckDuckGo and return results."""
|
159 |
+
# doc_list = []
|
160 |
+
# with DDGS(verify=False) as ddgs:
|
161 |
+
# i = 0
|
162 |
+
# for r in ddgs.text(topic, region='wt-wt', safesearch='On', timelimit='n'):
|
163 |
+
# if i >= max_references:
|
164 |
+
# break
|
165 |
+
# doc_list.append({"type": data_type, "title": r['title'], "body": r['body'], "url": r['href']})
|
166 |
+
# i += 1
|
167 |
+
# return doc_list
|
168 |
+
# Using Brave Search API
|
169 |
def search_web(topic, max_references=5, data_type="pdf"):
|
170 |
+
"""Search the web using the Brave API and return results."""
|
171 |
+
url = "https://game4all-serpent.hf.space/search_brave"
|
172 |
+
payload = {
|
173 |
+
"queries": [topic],
|
174 |
+
"n_results": max_references
|
175 |
+
}
|
176 |
+
|
177 |
+
try:
|
178 |
+
response = requests.post(url, json=payload)
|
179 |
+
response.raise_for_status()
|
180 |
+
data = response.json()
|
181 |
+
|
182 |
+
if data.get("error"):
|
183 |
+
raise ValueError(f"API Error: {data['error']}")
|
184 |
+
|
185 |
+
results = data.get("results", [])
|
186 |
+
doc_list = [
|
187 |
+
{
|
188 |
+
"type": data_type,
|
189 |
+
"title": r.get("title", ""),
|
190 |
+
"body": r.get("body", ""),
|
191 |
+
"url": r.get("href", "")
|
192 |
+
}
|
193 |
+
for r in results[:max_references]
|
194 |
+
]
|
195 |
+
return doc_list
|
196 |
+
|
197 |
+
except requests.RequestException as e:
|
198 |
+
print(f"HTTP Request failed: {e}")
|
199 |
+
return []
|
200 |
+
except ValueError as ve:
|
201 |
+
print(ve)
|
202 |
+
return []
|
203 |
|
204 |
def analyze_pdf_novelty(patent_background, url, data_type="pdf"):
|
205 |
"""Extract full document text from PDF or background from patent and evaluate novelty"""
|