GianJSX zej97 commited on
Commit
cc93c47
0 Parent(s):

Duplicate from zej97/AI-Research-Assistant

Browse files

Co-authored-by: ZE JIN <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. .gitignore +12 -0
  3. LICENSE +21 -0
  4. README.md +78 -0
  5. __pycache__/aira.cpython-311.pyc +0 -0
  6. __pycache__/aira.cpython-39.pyc +0 -0
  7. __pycache__/app.cpython-311.pyc +0 -0
  8. __pycache__/app.cpython-39.pyc +0 -0
  9. __pycache__/components.cpython-311.pyc +0 -0
  10. __pycache__/home.cpython-311.pyc +0 -0
  11. __pycache__/main.cpython-311.pyc +0 -0
  12. __pycache__/main.cpython-39.pyc +0 -0
  13. __pycache__/style.cpython-311.pyc +0 -0
  14. __pycache__/test.cpython-311.pyc +0 -0
  15. __pycache__/test2.cpython-311.pyc +0 -0
  16. __pycache__/test3.cpython-311.pyc +0 -0
  17. actions/__pycache__/duck_search.cpython-311.pyc +0 -0
  18. actions/__pycache__/google_search.cpython-311.pyc +0 -0
  19. actions/__pycache__/web_scrape.cpython-311.pyc +0 -0
  20. actions/__pycache__/web_scrape.cpython-39.pyc +0 -0
  21. actions/__pycache__/web_search.cpython-311.pyc +0 -0
  22. actions/__pycache__/web_search.cpython-39.pyc +0 -0
  23. actions/duck_search.py +11 -0
  24. actions/google_search.py +63 -0
  25. agent/__init__.py +0 -0
  26. agent/__pycache__/__init__.cpython-311.pyc +0 -0
  27. agent/__pycache__/llm_utils.cpython-311.pyc +0 -0
  28. agent/__pycache__/llm_utils.cpython-39.pyc +0 -0
  29. agent/__pycache__/prompts.cpython-311.pyc +0 -0
  30. agent/__pycache__/prompts.cpython-39.pyc +0 -0
  31. agent/__pycache__/research_agent.cpython-311.pyc +0 -0
  32. agent/__pycache__/research_agent.cpython-39.pyc +0 -0
  33. agent/__pycache__/run.cpython-311.pyc +0 -0
  34. agent/__pycache__/run.cpython-39.pyc +0 -0
  35. agent/__pycache__/toolkits.cpython-311.pyc +0 -0
  36. agent/llm_utils.py +39 -0
  37. agent/prompts.py +137 -0
  38. agent/research_agent.py +109 -0
  39. agent/toolkits.py +15 -0
  40. app.py +103 -0
  41. config/__init__.py +9 -0
  42. config/__pycache__/__init__.cpython-311.pyc +0 -0
  43. config/__pycache__/__init__.cpython-39.pyc +0 -0
  44. config/__pycache__/config.cpython-311.pyc +0 -0
  45. config/__pycache__/config.cpython-39.pyc +0 -0
  46. config/__pycache__/singleton.cpython-311.pyc +0 -0
  47. config/__pycache__/singleton.cpython-39.pyc +0 -0
  48. config/config.py +82 -0
  49. config/singleton.py +24 -0
  50. outputs/Is Apple stock stable/research--1807787156852143573.txt +1 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Ignore env containing secrets
2
+ .env
3
+ #Ignore Virtual Env
4
+ env/
5
+ #Ignore generated outputs
6
+ outputs/
7
+ #Ignore pycache
8
+ **/__pycache__/
9
+
10
+ test*.py
11
+ ./test/
12
+ ./flagged/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Ze Jin
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: AI-Research-Assistant
3
+ app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 3.38.0
6
+ duplicated_from: zej97/AI-Research-Assistant
7
+ ---
8
+ <div style="width: 100%;">
9
+ <img src="./statics/title.svg" style="width: 100%;">
10
+ <div align="right">
11
+ <a href="./README.md">English</a> |
12
+ <a href="./statics/README_zh.md">中文</a>
13
+ </div>
14
+ </div>
15
+
16
+ Inspired by [gpt-researcher](https://github.com/assafelovic/gpt-researcher). This project endeavors to develop an AI research assistant capable of **generating research reports** effortlessly for researchers. For instance, researchers can request the AI research assistant to compose a report on *the latest advancements in the field of superconductors as of 2023*, which is currently a trending topic. The AI research assistant will subsequently compile a report based on the relevant information obtained from the internet. Now, AIRA also offers support for **academic English polishing**.
17
+
18
+ <!-- make a table -->
19
+ | Example1-1 | Example1-2 | Example1-3 |
20
+ | :----------------------------------: | :----------------------------------: | :----------------------------------: |
21
+ | <img src="./statics/example1-1.png"> | <img src="./statics/example1-2.png"> | <img src="./statics/example1-3.png"> |
22
+
23
+ The currently supported agents encompass a wide range of fields, including *finance, business analysis, clinical medicine, basic medicine, travel, academic research and sociology*.
24
+
25
+ In addition to official api, this project offers an alternative approach to generating research reports by utilizing a third-party API. For access to this third-party API, please refer to [chimeragpt](https://chimeragpt.adventblocks.cc/) or [GPT-API-free](https://github.com/chatanywhere/GPT_API_free). Before running the project, kindly ensure that you set the environment variables `OPENAI_API_KEY` and `OPENAI_API_BASE`.
26
+
27
+ ```shell
28
+ $ export OPENAI_API_KEY = your_api_key
29
+ $ export OPENAI_API_BASE = your_api_base
30
+ ```
31
+
32
+ or you can set the api key and base in `.env` file.
33
+
34
+
35
+ ## Installation
36
+
37
+ 1. Clone the repository
38
+
39
+ ```shell
40
+ $ git clone [email protected]:paradoxtown/ai_research_assistant.git
41
+ $ cd ai_research_assistant
42
+ ```
43
+
44
+ 2. Install the dependencies
45
+
46
+ ```shell
47
+ $ pip install -r requirements.txt
48
+ ```
49
+
50
+ 3. Export evnironment variables
51
+
52
+ ```shell
53
+ $ export OPENAI_API_KEY = your_api_key
54
+ $ export OPENAI_API_BASE = your_api_base
55
+ ```
56
+ or modify the `.env` file.
57
+
58
+ 4. Run the project
59
+
60
+ ```shell
61
+ $ python app.py
62
+ ```
63
+
64
+ ## TODO
65
+
66
+ - [x] Switch Google Search to DuckDuckGo
67
+ - [ ] Literature review
68
+ - [x] Third-party API
69
+ - [ ] Prettify report
70
+ - [x] Add medical agent and social agent
71
+ - [ ] Add option for users to customize the number of words and temperature
72
+ - [ ] Copy and download buttons
73
+ - [ ] Allows the user to choose the degree of research.
74
+ - [ ] Wikipedia Understanding
75
+
76
+ ---
77
+
78
+ <div align="center">Happy researching! 🚀</div>
__pycache__/aira.cpython-311.pyc ADDED
Binary file (4.71 kB). View file
 
__pycache__/aira.cpython-39.pyc ADDED
Binary file (2.39 kB). View file
 
__pycache__/app.cpython-311.pyc ADDED
Binary file (7.18 kB). View file
 
__pycache__/app.cpython-39.pyc ADDED
Binary file (3.91 kB). View file
 
__pycache__/components.cpython-311.pyc ADDED
Binary file (164 Bytes). View file
 
__pycache__/home.cpython-311.pyc ADDED
Binary file (2.27 kB). View file
 
__pycache__/main.cpython-311.pyc ADDED
Binary file (3.84 kB). View file
 
__pycache__/main.cpython-39.pyc ADDED
Binary file (1.99 kB). View file
 
__pycache__/style.cpython-311.pyc ADDED
Binary file (1.93 kB). View file
 
__pycache__/test.cpython-311.pyc ADDED
Binary file (1.23 kB). View file
 
__pycache__/test2.cpython-311.pyc ADDED
Binary file (390 Bytes). View file
 
__pycache__/test3.cpython-311.pyc ADDED
Binary file (1.04 kB). View file
 
actions/__pycache__/duck_search.cpython-311.pyc ADDED
Binary file (970 Bytes). View file
 
actions/__pycache__/google_search.cpython-311.pyc ADDED
Binary file (3.87 kB). View file
 
actions/__pycache__/web_scrape.cpython-311.pyc ADDED
Binary file (10.6 kB). View file
 
actions/__pycache__/web_scrape.cpython-39.pyc ADDED
Binary file (6.73 kB). View file
 
actions/__pycache__/web_search.cpython-311.pyc ADDED
Binary file (1.31 kB). View file
 
actions/__pycache__/web_search.cpython-39.pyc ADDED
Binary file (769 Bytes). View file
 
actions/duck_search.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from duckduckgo_search import DDGS
2
+
3
+
4
+ def duckduckgo_search(query, max_search_result=3):
5
+ with DDGS() as ddgs:
6
+ responses = list()
7
+ for i, r in enumerate(ddgs.text(query, region='wt-wt', safesearch='off', timelimit='y')):
8
+ if i == max_search_result:
9
+ break
10
+ responses.append(r)
11
+ return responses
actions/google_search.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+
4
+
5
+ def get_urls(query, proxies=None):
6
+ query = query
7
+ url = f"https://www.google.com/search?q={query}"
8
+ headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
9
+ response = requests.get(url, headers=headers, proxies=proxies)
10
+ soup = BeautifulSoup(response.content, 'html.parser')
11
+ results = []
12
+ for g in soup.find_all('div', class_='g'):
13
+ anchors = g.find_all('a')
14
+ if anchors:
15
+ link = anchors[0]['href']
16
+ if link.startswith('/url?q='):
17
+ link = link[7:]
18
+ if not link.startswith('http'):
19
+ continue
20
+ title = g.find('h3').text
21
+ item = {'title': title, 'link': link}
22
+ results.append(item)
23
+
24
+ return results
25
+
26
+ def scrape_text(url, proxies=None) -> str:
27
+ """Scrape text from a webpage
28
+
29
+ Args:
30
+ url (str): The URL to scrape text from
31
+
32
+ Returns:
33
+ str: The scraped text
34
+ """
35
+ headers = {
36
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
37
+ 'Content-Type': 'text/plain',
38
+ }
39
+ try:
40
+ response = requests.get(url, headers=headers, proxies=proxies, timeout=8)
41
+ if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding
42
+ except:
43
+ return "Unable to connect to the server"
44
+ soup = BeautifulSoup(response.text, "html.parser")
45
+ for script in soup(["script", "style"]):
46
+ script.extract()
47
+ text = soup.get_text()
48
+ lines = (line.strip() for line in text.splitlines())
49
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
50
+ text = "\n".join(chunk for chunk in chunks if chunk)
51
+ return text
52
+
53
+
54
+ if __name__ == '__main__':
55
+ txt = "What is LSTM?"
56
+ proxies = None
57
+ urls = get_urls(txt, proxies)
58
+ max_search_result = 10
59
+
60
+ for url in urls[:max_search_result]:
61
+ print(url)
62
+ print(scrape_text(url['link'], proxies))
63
+ print("\n\n")
agent/__init__.py ADDED
File without changes
agent/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (150 Bytes). View file
 
agent/__pycache__/llm_utils.cpython-311.pyc ADDED
Binary file (1.66 kB). View file
 
agent/__pycache__/llm_utils.cpython-39.pyc ADDED
Binary file (2.95 kB). View file
 
agent/__pycache__/prompts.cpython-311.pyc ADDED
Binary file (11.3 kB). View file
 
agent/__pycache__/prompts.cpython-39.pyc ADDED
Binary file (9.36 kB). View file
 
agent/__pycache__/research_agent.cpython-311.pyc ADDED
Binary file (6.62 kB). View file
 
agent/__pycache__/research_agent.cpython-39.pyc ADDED
Binary file (7.01 kB). View file
 
agent/__pycache__/run.cpython-311.pyc ADDED
Binary file (729 Bytes). View file
 
agent/__pycache__/run.cpython-39.pyc ADDED
Binary file (2.17 kB). View file
 
agent/__pycache__/toolkits.cpython-311.pyc ADDED
Binary file (853 Bytes). View file
 
agent/llm_utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from config import Config
3
+ import openai
4
+
5
+ CFG = Config()
6
+
7
+ openai.api_key = CFG.openai_api_key
8
+ openai.api_base = CFG.openai_api_base
9
+
10
+ from typing import Optional
11
+
12
+ def llm_response(model,
13
+ messages,
14
+ temperature: float = CFG.temperature,
15
+ max_tokens: Optional[int] = None):
16
+ return openai.ChatCompletion.create(
17
+ model=model,
18
+ messages=messages,
19
+ temperature=temperature,
20
+ max_tokens=max_tokens,
21
+ ).choices[0].message["content"]
22
+
23
+
24
+ def llm_stream_response(model,
25
+ messages,
26
+ temperature: float = CFG.temperature,
27
+ max_tokens: Optional[int] = None):
28
+ response = ""
29
+ for chunk in openai.ChatCompletion.create(
30
+ model=model,
31
+ messages=messages,
32
+ temperature=temperature,
33
+ max_tokens=max_tokens,
34
+ stream=True,
35
+ ):
36
+ content = chunk["choices"][0].get("delta", {}).get("content")
37
+ if content is not None:
38
+ response += content
39
+ yield response
agent/prompts.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def generate_agent_role_prompt(agent):
2
+ """ Generates the agent role prompt.
3
+ Args: agent (str): The type of the agent.
4
+ Returns: str: The agent role prompt.
5
+ """
6
+ prompts = {
7
+ "Finance Agent": "You are a seasoned finance analyst AI assistant. Your primary goal is to compose comprehensive, astute, impartial, and methodically arranged financial reports based on provided data and trends.",
8
+
9
+ "Travel Agent": "You are a world-travelled AI tour guide assistant. Your main purpose is to draft engaging, insightful, unbiased, and well-structured travel reports on given locations, including history, attractions, and cultural insights.",
10
+
11
+ "Academic Research Agent": "You are an AI academic research assistant. Your primary responsibility is to create thorough, academically rigorous, unbiased, and systematically organized reports on a given research topic, following the standards of scholarly work.",
12
+
13
+ "Business Analyst Agent": "You are an experienced AI business analyst assistant. Your main objective is to produce comprehensive, insightful, impartial, and systematically structured business reports based on provided business data, market trends, and strategic analysis.",
14
+ "Computer Security Analyst Agent": "You are an AI specializing in computer security analysis. Your principal duty is to generate comprehensive, meticulously detailed, impartial, and systematically structured reports on computer security topics. This includes Exploits, Techniques, Threat Actors, and Advanced Persistent Threat (APT) Groups. All produced reports should adhere to the highest standards of scholarly work and provide in-depth insights into the complexities of computer security.",
15
+
16
+ "Clinical Medicine Agent": "You are an AI specializing in clinical medicine analysis. Your primary role is to compose comprehensive, well-researched, impartial, and methodically organized reports on various aspects of clinical medicine. This includes in-depth studies on medical conditions, treatments, medical advancements, patient care, and healthcare practices. Your reports should follow the highest standards of medical research and provide critical insights into the complexities of the clinical medicine field. Whether it's analyzing medical data, conducting literature reviews, or evaluating the efficacy of medical interventions, your goal is to deliver insightful and evidence-based reports to assist medical professionals and researchers in making informed decisions.",
17
+
18
+ "Basic Medicine Agent": "You are an AI specializing in basic medicine. Your goal is to provide comprehensive, unbiased reports on essential healthcare topics. Deliver clear insights into general health practices, common medical conditions, preventive measures, first aid procedures, and healthy lifestyle choices. Aim to be accessible to non-medical professionals and offer evidence-based recommendations for overall well-being.",
19
+
20
+ "Social Science Research Agent": "You are an AI social science research assistant with a focus on providing comprehensive, well-researched, and unbiased reports on various topics within the social sciences. Your primary goal is to delve into the complexities of human behavior, society, and culture to produce insightful and methodically organized reports. Whether it's sociology, psychology, anthropology, economics, or any other social science discipline, you excel in critically analyzing data, academic literature, and historical trends to offer valuable insights into the subject matter. Your reports are crafted to meet the highest standards of scholarly work, adhering to objectivity and academic rigor while presenting information in a clear and engaging manner. With your expertise, you can delve into societal issues, cultural dynamics, economic trends, and other relevant areas within the realm of social sciences.",
21
+
22
+ "Default Agent": "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text."
23
+ }
24
+
25
+ return prompts.get(agent, "No such agent")
26
+
27
+
28
+ def generate_report_prompt(question, research_summary):
29
+ """ Generates the report prompt for the given question and research summary.
30
+ Args: question (str): The question to generate the report prompt for
31
+ research_summary (str): The research summary to generate the report prompt for
32
+ Returns: str: The report prompt for the given question and research summary
33
+ """
34
+
35
+ return f'"""{research_summary}""" Using the above information, answer the following'\
36
+ f' question or topic: "{question}" in a detailed report --'\
37
+ " The report should focus on the answer to the question, should be well structured, informative, detailed" \
38
+ " in depth, with facts and numbers if available, a minimum of 2,400 words and with markdown syntax and apa format. "\
39
+ "Write all source urls at the end of the report in apa format."
40
+
41
+
42
+ def generate_search_queries_prompt(question):
43
+ """ Generates the search queries prompt for the given question.
44
+ Args: question (str): The question to generate the search queries prompt for
45
+ Returns: str: The search queries prompt for the given question
46
+ """
47
+
48
+ return f'Write 5 google search queries to search online that form an objective opinion from the following: "{question}"\n'\
49
+ 'You must respond with a list of strings in the following json format: {"Q1": query1, "Q2": query2, "Q3": query3, "Q4": query4, "Q5": query5}'
50
+
51
+
52
+ def generate_resource_report_prompt(question, research_summary):
53
+ """Generates the resource report prompt for the given question and research summary.
54
+
55
+ Args:
56
+ question (str): The question to generate the resource report prompt for.
57
+ research_summary (str): The research summary to generate the resource report prompt for.
58
+
59
+ Returns:
60
+ str: The resource report prompt for the given question and research summary.
61
+ """
62
+ return f'"""{research_summary}""" Based on the above information, generate a bibliography recommendation report for the following' \
63
+ f' question or topic: "{question}". The report should provide a detailed analysis of each recommended resource,' \
64
+ ' explaining how each source can contribute to finding answers to the research question.' \
65
+ ' Focus on the relevance, reliability, and significance of each source.' \
66
+ ' Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax.' \
67
+ ' Include relevant facts, figures, and numbers whenever available.' \
68
+ ' The report should have a minimum length of 1,200 words.'
69
+
70
+
71
+ def generate_outline_report_prompt(question, research_summary):
72
+ """ Generates the outline report prompt for the given question and research summary.
73
+ Args: question (str): The question to generate the outline report prompt for
74
+ research_summary (str): The research summary to generate the outline report prompt for
75
+ Returns: str: The outline report prompt for the given question and research summary
76
+ """
77
+
78
+ return f'"""{research_summary}""" Using the above information, generate an outline for a research report in Markdown syntax'\
79
+ f' for the following question or topic: "{question}". The outline should provide a well-structured framework'\
80
+ ' for the research report, including the main sections, subsections, and key points to be covered.' \
81
+ ' The research report should be detailed, informative, in-depth, and a minimum of 1,200 words.' \
82
+ ' Use appropriate Markdown syntax to format the outline and ensure readability.'
83
+
84
+
85
+ def generate_concepts_prompt(question, research_summary):
86
+ """ Generates the concepts prompt for the given question.
87
+ Args: question (str): The question to generate the concepts prompt for
88
+ research_summary (str): The research summary to generate the concepts prompt for
89
+ Returns: str: The concepts prompt for the given question
90
+ """
91
+
92
+ return f'"""{research_summary}""" Using the above information, generate a list of 5 main concepts to learn for a research report'\
93
+ f' on the following question or topic: "{question}". The outline should provide a well-structured framework'\
94
+ 'You must respond with a list of strings in the following format: ["concepts 1", "concepts 2", "concepts 3", "concepts 4, concepts 5"]'
95
+
96
+
97
+ def generate_lesson_prompt(concept):
98
+ """
99
+ Generates the lesson prompt for the given question.
100
+ Args:
101
+ concept (str): The concept to generate the lesson prompt for.
102
+ Returns:
103
+ str: The lesson prompt for the given concept.
104
+ """
105
+
106
+ prompt = f'generate a comprehensive lesson about {concept} in Markdown syntax. This should include the definition'\
107
+ f'of {concept}, its historical background and development, its applications or uses in different'\
108
+ f'fields, and notable events or facts related to {concept}.'
109
+
110
+ return prompt
111
+
112
+
113
+ def get_report_by_type(report_type):
114
+ report_type_mapping = {
115
+ 'Research Report': generate_report_prompt,
116
+ 'Resource Report': generate_resource_report_prompt,
117
+ 'Outline Report': generate_outline_report_prompt
118
+ }
119
+ return report_type_mapping[report_type]
120
+
121
+
122
+ def generate_english_polishing_prompt(content):
123
+ """ Generates the english polishing prompt for the given content.
124
+ Inspired by project gpt_academic
125
+ Args: question (str):
126
+ Returns: str: The english polishing prompt for the given content
127
+ """
128
+ return f'Below is a paragraph from an academic paper. Polish the writing to meet the academic style and improve the spelling, grammar, clarity, concision, and overall readability. When necessary, rewrite the whole sentence. Furthermore, list all modifications and explain the reasons for doing so in the markdown table. \n {content}'
129
+
130
+
131
+ def generate_summarize_prompt(content):
132
+ """ Generates the summarize prompt for the given content.
133
+ Inspired by project gpt_academic
134
+ Args: question (str):
135
+ Returns: str: The summarize prompt for the given content
136
+ """
137
+ return f'The following information is crawled from the Internet and will be used in writing the research report. Please clear the junk information and summarize the useful information in depth. Include all factual information, numbers, stats etc if available. \n {content}'
agent/research_agent.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from actions.duck_search import duckduckgo_search
3
+ from processing.text import read_txt_files
4
+ from agent.llm_utils import llm_response, llm_stream_response
5
+ from config import Config
6
+ from agent import prompts
7
+ import os
8
+ import string
9
+
10
+ CFG = Config()
11
+
12
+
13
+ class ResearchAgent:
14
+ def __init__(self, question, agent):
15
+ """ Initializes the research assistant with the given question.
16
+ Args: question (str): The question to research
17
+ Returns: None
18
+ """
19
+
20
+ self.question = question
21
+ self.agent = agent
22
+ self.visited_urls = set()
23
+ self.search_summary = ""
24
+ self.directory_name = ''.join(c for c in question if c.isascii() and c not in string.punctuation)[:100]
25
+ self.dir_path = os.path.dirname(f"./outputs/{self.directory_name}/")
26
+
27
+ def call_agent(self, action):
28
+ messages = [{
29
+ "role": "system",
30
+ "content": prompts.generate_agent_role_prompt(self.agent),
31
+ }, {
32
+ "role": "user",
33
+ "content": action,
34
+ }]
35
+ return llm_response(
36
+ model=CFG.fast_llm_model,
37
+ messages=messages,
38
+ )
39
+
40
+ def call_agent_stream(self, action):
41
+ messages = [{
42
+ "role": "system",
43
+ "content": prompts.generate_agent_role_prompt(self.agent),
44
+ }, {
45
+ "role": "user",
46
+ "content": action,
47
+ }]
48
+ yield from llm_stream_response(
49
+ model=CFG.fast_llm_model,
50
+ messages=messages
51
+ )
52
+
53
+ def create_search_queries(self):
54
+ """ Creates the search queries for the given question.
55
+ Args: None
56
+ Returns: list[str]: The search queries for the given question
57
+ """
58
+ result = self.call_agent(prompts.generate_search_queries_prompt(self.question))
59
+ return json.loads(result)
60
+
61
+ def search_single_query(self, query):
62
+ """ Runs the async search for the given query.
63
+ Args: query (str): The query to run the async search for
64
+ Returns: list[str]: The async search for the given query
65
+ """
66
+ return duckduckgo_search(query, max_search_result=3)
67
+
68
+ def run_search_summary(self, query):
69
+ """ Runs the search summary for the given query.
70
+ Args: query (str): The query to run the search summary for
71
+ Returns: str: The search summary for the given query
72
+ """
73
+ responses = self.search_single_query(query)
74
+
75
+ print(f"Searching for {query}")
76
+ query = hash(query)
77
+ file_path = f"./outputs/{self.directory_name}/research-{query}.txt"
78
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
79
+ with open(file_path, "w") as f:
80
+ json.dump(responses, f)
81
+ print(f"Saved {query} to {file_path}")
82
+ return responses
83
+
84
+ def search_online(self):
85
+ """ Conducts the search for the given question.
86
+ Args: None
87
+ Returns: str: The search results for the given question
88
+ """
89
+
90
+ self.search_summary = read_txt_files(self.dir_path) if os.path.isdir(self.dir_path) else ""
91
+
92
+ if not self.search_summary:
93
+ search_queries = self.create_search_queries()
94
+ for _, query in search_queries.items():
95
+ search_result = self.run_search_summary(query)
96
+ self.search_summary += f"=Query=:\n{query}\n=Search Result=:\n{search_result}\n================\n"
97
+
98
+ return self.search_summary
99
+
100
+ def write_report(self, report_type):
101
+ """ Writes the report for the given question.
102
+ Args: None
103
+ Returns: str: The report for the given question
104
+ """
105
+ # yield "Searching online..."
106
+
107
+ report_type_func = prompts.get_report_by_type(report_type)
108
+
109
+ yield from self.call_agent_stream(report_type_func(self.question, self.search_online()))
agent/toolkits.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agent import prompts, llm_utils
2
+ from config import Config
3
+
4
+ CFG = Config()
5
+
6
+ def english_polishing(content):
7
+ prompt = prompts.generate_english_polishing_prompt(content)
8
+ messages = [{
9
+ "role": "user",
10
+ "content": prompt,
11
+ }]
12
+
13
+ yield from llm_utils.llm_stream_response(
14
+ model=CFG.fast_llm_model,
15
+ messages=messages)
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from config import check_openai_api_key
4
+ from agent.research_agent import ResearchAgent
5
+ from agent.toolkits import english_polishing
6
+ from statics.style import *
7
+
8
+
9
+ check_openai_api_key()
10
+ report_history_buffer = ""
11
+ report_history_num = 0
12
+ report_history_tasks = []
13
+ polish_history_buffer = ""
14
+
15
+ def run_agent(task, agent, report_type):
16
+ global report_history_num, report_history_tasks
17
+ report_history_num += 1
18
+ report_history_tasks.append(task)
19
+ assistant = ResearchAgent(task, agent)
20
+ yield from assistant.write_report(report_type)
21
+
22
+
23
+ with gr.Blocks(theme=gr.themes.Base(),
24
+ title="AI Research Assistant",
25
+ css=css) as demo:
26
+ gr.HTML(top_bar)
27
+ with gr.Tab(label="🔦Report"):
28
+ with gr.Column():
29
+ gr.HTML(report_html)
30
+ report = gr.Markdown(value="&nbsp;&nbsp;Report will appear here...",
31
+ elem_classes="output")
32
+ with gr.Row():
33
+ agent_type = gr.Dropdown(label="# Agent Type",
34
+ value="Default Agent",
35
+ interactive=True,
36
+ allow_custom_value=False,
37
+ choices=["Default Agent",
38
+ "Business Analyst Agent",
39
+ "Finance Agent",
40
+ "Travel Agent",
41
+ "Academic Research Agent",
42
+ "Computer Security Analyst Agent",
43
+ "Clinical Medicine Agent",
44
+ "Basic Medicine Agent",
45
+ "Social Science Research Agent"])
46
+ report_type = gr.Dropdown(label="# Report Type",
47
+ value="Research Report",
48
+ interactive=True,
49
+ allow_custom_value=False,
50
+ choices=["Research Report",
51
+ "Resource Report",
52
+ "Outline Report"])
53
+
54
+ input_box = gr.Textbox(label="# What would you like to research next?", placeholder="Enter your question here")
55
+ submit_btn = gr.Button("Generate Report", elem_id="primary-btn")
56
+
57
+ gr.Examples(["Should I invest in the Large Language Model industry in 2023?",
58
+ "Is it advisable to make investments in the electric car industry during the year 2023?",
59
+ "What constitutes the optimal approach for investing in the Bitcoin industry during the year 2023?",
60
+ "What are the most recent advancements in the domain of superconductors as of 2023?"],
61
+ inputs=input_box)
62
+
63
+ with gr.Accordion(label="# Report History", elem_id="history", open=False):
64
+ report_history = gr.Markdown()
65
+
66
+ def store_report(content):
67
+ global report_history_num, report_history_tasks, report_history_buffer
68
+ report_history_buffer += f'<details> \
69
+ <summary>Research History {report_history_num}: \
70
+ <i>{report_history_tasks[-1]}</i></summary> \
71
+ <div id="history_box">{content}</div> \
72
+ </details>'
73
+ return report_history_buffer
74
+
75
+ submit_btn.click(run_agent, inputs=[input_box, agent_type, report_type], outputs=report)\
76
+ .then(store_report, inputs=[report], outputs=report_history)
77
+
78
+ with gr.Tab("✒️English Polishing"):
79
+ gr.HTML(english_polishing_html)
80
+ polished_result = gr.Markdown("&nbsp;&nbsp;Polished result will appear here...", elem_classes="output")
81
+ sentences = gr.Textbox(label="# What would you like to polish?", placeholder="Enter your sentence here")
82
+
83
+ with gr.Row():
84
+ polish_btn = gr.Button("Polish", elem_id="primary-btn")
85
+
86
+ with gr.Accordion(label="# Polishing History", elem_id="history", open=False):
87
+ polish_history = gr.Markdown()
88
+
89
+ def store_polished_result(origin, result):
90
+ global polish_history_buffer
91
+ polish_history_buffer += f'<details> \
92
+ <summary><i>{origin}</i></summary> \
93
+ <div id="history_box">{result}</div> \
94
+ </details>'
95
+ return polish_history_buffer
96
+
97
+ polish_btn.click(english_polishing, inputs=[sentences], outputs=polished_result) \
98
+ .then(store_polished_result, inputs=[sentences, polished_result], outputs=polish_history)
99
+
100
+ with gr.Tab("📑Literature Review"):
101
+ gr.HTML(literature_review_html)
102
+
103
+ demo.queue().launch()
config/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from config.config import Config, check_openai_api_key
2
+ from config.singleton import AbstractSingleton, Singleton
3
+
4
+ __all__ = [
5
+ "check_openai_api_key",
6
+ "AbstractSingleton",
7
+ "Config",
8
+ "Singleton",
9
+ ]
config/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (407 Bytes). View file
 
config/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (334 Bytes). View file
 
config/__pycache__/config.cpython-311.pyc ADDED
Binary file (5.13 kB). View file
 
config/__pycache__/config.cpython-39.pyc ADDED
Binary file (3.51 kB). View file
 
config/__pycache__/singleton.cpython-311.pyc ADDED
Binary file (1.46 kB). View file
 
config/__pycache__/singleton.cpython-39.pyc ADDED
Binary file (1.04 kB). View file
 
config/config.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Configuration class to store the state of bools for different scripts access."""
2
+ import os
3
+
4
+ import openai
5
+ from colorama import Fore
6
+ from dotenv import load_dotenv
7
+
8
+ from config.singleton import Singleton
9
+
10
+ load_dotenv(verbose=True)
11
+
12
+
13
+ class Config(metaclass=Singleton):
14
+ """
15
+ Configuration class to store the state of bools for different scripts access.
16
+ """
17
+
18
+ def __init__(self) -> None:
19
+ """Initialize the Config class"""
20
+ self.debug_mode = False
21
+ self.allow_downloads = False
22
+
23
+ self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
24
+ self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
25
+ self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
26
+ self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 8000))
27
+ self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
28
+ self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
29
+
30
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
31
+ self.openai_api_base = os.getenv("OPENAI_API_BASE", openai.api_base)
32
+ self.temperature = float(os.getenv("TEMPERATURE", "1"))
33
+
34
+ self.user_agent = os.getenv(
35
+ "USER_AGENT",
36
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
37
+ " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
38
+ )
39
+
40
+ self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
41
+ # Initialize the OpenAI API client
42
+ openai.api_key = self.openai_api_key
43
+
44
+ def set_fast_llm_model(self, value: str) -> None:
45
+ """Set the fast LLM model value."""
46
+ self.fast_llm_model = value
47
+
48
+ def set_smart_llm_model(self, value: str) -> None:
49
+ """Set the smart LLM model value."""
50
+ self.smart_llm_model = value
51
+
52
+ def set_fast_token_limit(self, value: int) -> None:
53
+ """Set the fast token limit value."""
54
+ self.fast_token_limit = value
55
+
56
+ def set_smart_token_limit(self, value: int) -> None:
57
+ """Set the smart token limit value."""
58
+ self.smart_token_limit = value
59
+
60
+ def set_browse_chunk_max_length(self, value: int) -> None:
61
+ """Set the browse_website command chunk max length value."""
62
+ self.browse_chunk_max_length = value
63
+
64
+ def set_openai_api_key(self, value: str) -> None:
65
+ """Set the OpenAI API key value."""
66
+ self.openai_api_key = value
67
+
68
+ def set_debug_mode(self, value: bool) -> None:
69
+ """Set the debug mode value."""
70
+ self.debug_mode = value
71
+
72
+
73
+ def check_openai_api_key() -> None:
74
+ """Check if the OpenAI API key is set in config.py or as an environment variable."""
75
+ cfg = Config()
76
+ if not cfg.openai_api_key:
77
+ print(
78
+ Fore.RED
79
+ + "Please set your OpenAI API key in .env or as an environment variable."
80
+ )
81
+ print("You can get your key from https://platform.openai.com/account/api-keys")
82
+ exit(1)
config/singleton.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The singleton metaclass for ensuring only one instance of a class."""
2
+ import abc
3
+
4
+
5
+ class Singleton(abc.ABCMeta, type):
6
+ """
7
+ Singleton metaclass for ensuring only one instance of a class.
8
+ """
9
+
10
+ _instances = {}
11
+
12
+ def __call__(cls, *args, **kwargs):
13
+ """Call method for the singleton metaclass."""
14
+ if cls not in cls._instances:
15
+ cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
16
+ return cls._instances[cls]
17
+
18
+
19
+ class AbstractSingleton(abc.ABC, metaclass=Singleton):
20
+ """
21
+ Abstract singleton class for ensuring only one instance of a class.
22
+ """
23
+
24
+ pass
outputs/Is Apple stock stable/research--1807787156852143573.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ [{"title": "Apple Inc. (AAPL) Stock Historical Prices & Data - Yahoo Finance", "href": "https://finance.yahoo.com/quote/AAPL/history", "body": "Discover historical prices for AAPL stock on Yahoo Finance. View daily, weekly or monthly format back to when Apple Inc. stock was issued."}, {"title": "Apple - 43 Year Stock Price History | AAPL | MacroTrends", "href": "https://www.macrotrends.net/stocks/charts/AAPL/apple/stock-price-history", "body": "Market Cap. Historical daily share price chart and data for Apple since 1980 adjusted for splits and dividends. The latest closing stock price for Apple as of August 02, 2023 is 192.58. The all-time high Apple stock closing price was 196.45 on July 31, 2023. The Apple 52-week high stock price is 198.23, which is 2.9% above the current share price."}, {"title": "Apple (AAPL) Historical Data | Nasdaq", "href": "https://www.nasdaq.com/market-activity/stocks/aapl/historical", "body": "Back to AAPL Overview. Get up to 10 years of daily historical stock prices & volumes. The \"Close/Last\" is the \"adjust consolidated close price\". Data provided by Edgar Online . The net and ..."}]