im commited on
Commit
901c6a4
·
1 Parent(s): 7c2b202

init commit

Browse files
.gitattributes CHANGED
@@ -1,35 +1,5 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ assets/socrates-avatar.jpg filter=lfs diff=lfs merge=lfs -text
2
+ assets/theatetus-avatar.png filter=lfs diff=lfs merge=lfs -text
3
+ assets/agent-avatar.jpg filter=lfs diff=lfs merge=lfs -text
4
+ assets/intro-trio.jpg filter=lfs diff=lfs merge=lfs -text
5
+ assets/plato-avatar.jpg filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # flask
86
+ flask_session
87
+ *.log
88
+ datasets/
89
+
90
+ # pyenv
91
+ # For a library or package, you might want to ignore these files since the code is
92
+ # intended to run in multiple environments; otherwise, check them in:
93
+ # .python-version
94
+
95
+ # pipenv
96
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
97
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
98
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
99
+ # install all needed dependencies.
100
+ #Pipfile.lock
101
+
102
+ # poetry
103
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
104
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
105
+ # commonly ignored for libraries.
106
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
107
+ #poetry.lock
108
+
109
+ # pdm
110
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
111
+ #pdm.lock
112
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
113
+ # in version control.
114
+ # https://pdm.fming.dev/#use-with-ide
115
+ .pdm.toml
116
+
117
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
118
+ __pypackages__/
119
+
120
+ # Celery stuff
121
+ celerybeat-schedule
122
+ celerybeat.pid
123
+
124
+ # SageMath parsed files
125
+ *.sage.py
126
+
127
+ # Environments
128
+ .env
129
+ .venv
130
+ env/
131
+ venv/
132
+ ENV/
133
+ env.bak/
134
+ venv.bak/
135
+
136
+ # Spyder project settings
137
+ .spyderproject
138
+ .spyproject
139
+
140
+ # Rope project settings
141
+ .ropeproject
142
+
143
+ # mkdocs documentation
144
+ /site
145
+
146
+ # mypy
147
+ .mypy_cache/
148
+ .dmypy.json
149
+ dmypy.json
150
+
151
+ # Pyre type checker
152
+ .pyre/
153
+
154
+ # pytype static type analyzer
155
+ .pytype/
156
+
157
+ # Cython debug symbols
158
+ cython_debug/
159
+
160
+ # PyCharm
161
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
162
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
163
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
164
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
165
+ .idea/
166
+ .streamlit/secrets.toml
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ font="sans serif"
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Runzhe Yang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -10,4 +10,33 @@ pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  license: mit
11
  ---
12
 
13
+ # Socratiq
14
+
15
+ Socratiq is an application that utilizes OpenAI's GPT model to simulate a Socratic dialogue. Inspired by the SocraticAI project by Princeton NLP, it aims to facilitate an interactive learning environment where the user can explore complex questions through an engaging, back-and-forth conversation.
16
+ Many thanks to Runzhe Yang and Karthik Narasimhan for their insightful article https://princeton-nlp.github.io/SocraticAI/.
17
+
18
+ ## Features
19
+
20
+ - **Interactive Dialogue**: Engage with the AI in a conversational format, where the AI plays the roles of Socrates, Theaetetus, and Plato, thus bringing a multi-perspective approach to problem-solving.
21
+
22
+ - **Session Management**: The application maintains the state of your conversation across the session, allowing the dialogue to unfold naturally.
23
+
24
+ - **User-friendly Interface**: Built with Streamlit, the application provides a clean and straightforward interface for user interactions.
25
+
26
+ ## Setup
27
+
28
+ 1. Clone the repository.
29
+ 2. Install the necessary dependencies using `pip install -r requirements.txt`.
30
+ 3. Run the app locally using `streamlit run app.py`.
31
+ 4. Put your OpenAI, Crawling and Metaphor API keys in .streamlit/secrets.toml file.
32
+
33
+ ## Usage
34
+
35
+ After setting up the application, you can start asking questions to the AI. The AI, acting as Socrates, Theaetetus, and Plato, will collaboratively attempt to answer your questions, ask clarifying queries, or provide deeper insights.
36
+
37
+ Please note that this is a conversational AI, and the quality of the responses will depend on the capabilities of the underlying GPT-3/GPT-4 models.
38
+
39
+ ## Limitations and Disclaimer
40
+
41
+ While the application aims to provide informative and engaging dialogues, it's important to note that the AI's responses are generated based on pre-existing knowledge and may not always reflect the most current or accurate information. Always cross-check critical information with other sources.
42
+
agent.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.agents import OpenAIFunctionsAgent
2
+ from langchain.schema import SystemMessage
3
+ from langchain.agents import AgentExecutor
4
+ from langchain.tools.base import ToolException
5
+
6
+
7
+ class Agent:
8
+ def __init__(self, llm, tools, max_iterations=15):
9
+ for tool in tools:
10
+ def _handle_error(error: ToolException) -> str:
11
+ return (
12
+ f"The following errors occurred during tool '{tool.name}' execution:"
13
+ + error.args[0]
14
+ + "Please try another tool."
15
+ )
16
+
17
+ tool.handle_tool_error = _handle_error
18
+ self._tools = tools
19
+ system_message = SystemMessage(
20
+ content="You are a web researcher who uses search engines to look up information.")
21
+ prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
22
+ agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
23
+ self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, max_iterations=max_iterations)
24
+
25
+ def run(self, query):
26
+ try:
27
+ return self.agent_executor.run(query)
28
+ except Exception as e:
29
+ msg = f"Agent encounter an error.\n\n Error: {str(e)}"
30
+ return msg
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from socratic import *
3
+ from tool import get_tools
4
+ from agent import Agent
5
+ from common import get_llm
6
+ import sys
7
+ import time
8
+
9
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
10
+
11
+ # --- APPLICATION ---
12
+
13
+ PAGE_TITLE: str = "Socratiq"
14
+ PAGE_ICON: str = "🗨️"
15
+ N_ROUND: int = 50
16
+
17
+ st.set_page_config(page_title=PAGE_TITLE, page_icon=PAGE_ICON)
18
+
19
+ open_api_key = st.secrets["OPENAI_API_KEY"]
20
+ st.session_state.key = open_api_key
21
+
22
+
23
+ def init_session() -> None:
24
+ tools = get_tools()
25
+ st.session_state.agent = Agent(get_llm(model_name='gpt-4', model_temperature=0, api_key=st.session_state.key),
26
+ tools)
27
+ st.session_state.socrates = SocraticGPT(role=SOCRATES, tools=st.session_state.agent._tools,
28
+ key=st.session_state.key, n_round=N_ROUND)
29
+ st.session_state.theaetetus = SocraticGPT(role=THEAETETUS, tools=st.session_state.agent._tools,
30
+ key=st.session_state.key, n_round=N_ROUND)
31
+ st.session_state.plato = SocraticGPT(role=PLATO, tools=st.session_state.agent._tools, key=st.session_state.key,
32
+ n_round=N_ROUND, model="gpt-4")
33
+ st.session_state.dialog_lead = None
34
+ st.session_state.dialog_follower = None
35
+ st.session_state.messages = []
36
+ st.session_state.question = None
37
+ st.session_state.user_input = None
38
+ st.session_state.in_progress = False
39
+ st.session_state.user_question = None
40
+
41
+
42
+ def get_random_question():
43
+ return "What is the size of the Moon?"
44
+
45
+
46
+ def show_intro_screen():
47
+ st.header("Socratiq")
48
+ st.image("./assets/intro-trio.jpg")
49
+ description = """\
50
+ You ask, 'What's the meaning of life?' and our trio of digital philosophers fetch real-time
51
+ wisdom faster than you can say 'Immanuel Kant.' Whether you’re curious about science, or even the nuances of
52
+ modern art, Soratiq has you covered. Your feedback shapes the conversation, making it an educational journey
53
+ tailored just for you. So why settle for small talk? Dive into Soratiq today and elevate your discourse!
54
+ """
55
+ st.caption(description)
56
+ st.divider()
57
+
58
+ if st.session_state.question is None:
59
+ question = st.text_input(label='Paste your question. E.g. "What is the size of the moon?"',
60
+ label_visibility='collapsed',
61
+ placeholder='Paste your question. E.g. "What is the size of the moon?"')
62
+ col1, _, _, _, col2 = st.columns(5)
63
+ if col1.button(label="Ask Away!", help="Push the button and dive into the dialog around your question..."):
64
+ if not len(question) > 0:
65
+ st.warning(
66
+ "Whoops! That question seems to be doing the vanishing act. Could you give it another shot? Magic words: 'Valid question, Please!' 🪄")
67
+ if len(question) > 0:
68
+ set_user_question(question)
69
+
70
+ if col2.button(label="QuestionRoll", help="The button generates a random question for the user to ponder or discuss. This should be a fun and engaging experience, sparking curiosity."):
71
+ question = get_random_question()
72
+ set_user_question(question)
73
+ else:
74
+ if st.session_state.question is not None:
75
+ st.subheader(f"*{st.session_state.question}*")
76
+ st.divider()
77
+
78
+
79
+
80
+ def set_user_question(question):
81
+ st.session_state.question = question
82
+ st.session_state.socrates.set_question(question)
83
+ st.session_state.theaetetus.set_question(question)
84
+ st.session_state.plato.set_question(question)
85
+ st.experimental_rerun()
86
+
87
+
88
+ if 'question' not in st.session_state:
89
+ init_session()
90
+
91
+ def get_avatar(role):
92
+ if role == SOCRATES:
93
+ return "./assets/socrates-avatar.jpg"
94
+ elif role == THEAETETUS:
95
+ return "./assets/theatetus-avatar.png"
96
+ elif role == PLATO:
97
+ return "./assets/plato-avatar.jpg"
98
+ elif role == 'agent':
99
+ return "ai"
100
+ elif role == 'system':
101
+ return "./assets/agent-avatar.jpg"
102
+ elif role == 'user':
103
+ return "user"
104
+
105
+ def get_role(role):
106
+ if role == 'agent':
107
+ return 'ai'
108
+ elif role == 'system':
109
+ return 'ai'
110
+ else:
111
+ return 'user'
112
+
113
+ def show_chat() -> None:
114
+ if st.session_state.messages:
115
+ for message in st.session_state.messages:
116
+ role = message['role']
117
+ content = message['content']
118
+ with st.chat_message(get_role(role), avatar=get_avatar(role)):
119
+ if role == 'system':
120
+ st.markdown("*" + content + "*")
121
+ else:
122
+ st.markdown(content)
123
+
124
+
125
+ def add_message(role, content):
126
+ st.session_state.messages.append({"role": role, "content": content})
127
+ with st.chat_message(get_role(role), avatar=get_avatar(role)):
128
+ st.markdown(content)
129
+
130
+
131
+ def chat_input():
132
+ if st.session_state.user_question is not None:
133
+ prompt = st.chat_input("Share your wisdom...")
134
+ if prompt:
135
+ st.session_state.user_input = prompt
136
+ elif st.session_state.question is not None:
137
+ st.chat_input("...", disabled=True)
138
+
139
+ def main() -> None:
140
+ show_intro_screen()
141
+ chat_input()
142
+ show_chat()
143
+
144
+ if st.session_state.question is not None and st.session_state.user_question is None:
145
+ if not st.session_state.in_progress:
146
+ st.session_state.in_progress = True
147
+ st.session_state.dialog_lead, st.session_state.dialog_follower = st.session_state.socrates, st.session_state.theaetetus
148
+ # add_message(st.session_state.dialog_lead.role,
149
+ # f"""Hi {st.session_state.dialog_follower.role}, let's solve this problem together. Please feel free to correct me if I make any logical or mathematical mistakes.\n""")
150
+ else:
151
+ with st.spinner(f"{st.session_state.dialog_follower.role} is thinking..."):
152
+ rep = st.session_state.dialog_follower.get_response()
153
+ add_message(st.session_state.dialog_follower.role, f"{st.session_state.dialog_follower.role}: " + rep)
154
+ st.session_state.dialog_lead.update_history(rep)
155
+ st.session_state.plato.update_history(f"{st.session_state.dialog_follower.role}: " + rep)
156
+
157
+ # next round the opponent answers
158
+ st.session_state.dialog_lead, st.session_state.dialog_follower = st.session_state.dialog_follower, st.session_state.dialog_lead
159
+
160
+ answer = SocraticGPT.get_answer(rep)
161
+ user_question = SocraticGPT.get_user_question(rep)
162
+ agent_question = SocraticGPT.get_agent_question(rep)
163
+
164
+ if st.session_state.dialog_lead.role == st.session_state.theaetetus.role:
165
+ if user_question is None and agent_question is None and answer is None:
166
+ with st.spinner(f"thinking critically..."):
167
+ pr = st.session_state.plato.get_proofread()
168
+ if pr:
169
+ add_message(st.session_state.plato.role, f"{st.session_state.plato.role}: " + pr)
170
+ st.session_state.socrates.add_proofread(pr)
171
+ st.session_state.theaetetus.add_proofread(pr)
172
+ user_question = SocraticGPT.get_user_question(pr) # Plato can suggest to use agent or get user feedback
173
+ agent_question = SocraticGPT.get_agent_question(pr)
174
+
175
+ if agent_question:
176
+ with st.status(f"Consulting the agent: ''' {agent_question} '''"):
177
+ agent_msg = st.session_state.agent.run(agent_question) # TODO: agent status callback or status polling in a loop
178
+ st.session_state.socrates.add_agent_feedback(agent_question, agent_msg)
179
+ st.session_state.theaetetus.add_agent_feedback(agent_question, agent_msg)
180
+ st.session_state.plato.add_agent_feedback(agent_question, agent_msg)
181
+ add_message('agent', f"Agent: {agent_msg}")
182
+
183
+ if user_question:
184
+ st.session_state.user_question = user_question
185
+ add_message('system', f'User feedback is required. The question: **"{" ".join(user_question)}"**')
186
+ st.experimental_rerun()
187
+
188
+ if answer:
189
+ st.session_state.user_question = f"Is that correct answer? - {answer}"
190
+ add_message('system', f"""User, are you agree with the answer? - **{" ".join(answer)}**""")
191
+ st.experimental_rerun()
192
+
193
+ if st.session_state.user_input is not None:
194
+ user_input = st.session_state.user_input
195
+ if st.session_state.user_question is not None:
196
+ user_question = st.session_state.user_question
197
+ st.session_state.socrates.add_user_feedback(user_question, user_input)
198
+ st.session_state.theaetetus.add_user_feedback(user_question, user_input)
199
+ st.session_state.plato.add_user_feedback(user_question, user_input)
200
+ st.session_state.user_question = None
201
+ add_message("user", f"{user_input}")
202
+
203
+ st.session_state.user_input = None
204
+
205
+ if st.session_state.question is not None and st.session_state.user_question is None:
206
+ time.sleep(1)
207
+ st.experimental_rerun()
208
+
209
+
210
+ if __name__ == "__main__":
211
+ main()
212
+
213
+
214
+ # TODO: publish/access dialog debug logs, so the user can dig into the details
215
+ # TODO: possible answers to the question - like 'double check your answer' or 'make the answer sound like a pirate' etc
assets/agent-avatar.jpg ADDED

Git LFS Details

  • SHA256: 6ef17ae0b35c56c5389b1eacbf9c344d75601a1b3357bfdbcc6a4fe1afb3d033
  • Pointer size: 131 Bytes
  • Size of remote file: 153 kB
assets/intro-trio.jpg ADDED

Git LFS Details

  • SHA256: 0fe3e1219ff7505c5ac595e07de63c5fdc985431dd499532b869bda9516af76c
  • Pointer size: 131 Bytes
  • Size of remote file: 326 kB
assets/plato-avatar.jpg ADDED

Git LFS Details

  • SHA256: d9461d6cbfc8f0ecb7934c6bfc087e5e05506dcd0138a567bebbb75c87264602
  • Pointer size: 131 Bytes
  • Size of remote file: 148 kB
assets/socrates-avatar.jpg ADDED

Git LFS Details

  • SHA256: 0bf782d7794bd454e8a33a634661cbd87f46e4477e62eab89d4c1d03b4865081
  • Pointer size: 131 Bytes
  • Size of remote file: 132 kB
assets/theatetus-avatar.png ADDED

Git LFS Details

  • SHA256: 1306a486886754920a72fbf3f59b2a9c50f50ce3bed37bd68f5b3a79ecdaf899
  • Pointer size: 132 Bytes
  • Size of remote file: 2.35 MB
common.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chat_models import ChatOpenAI
2
+ from langchain.llms import OpenAI
3
+
4
+ def get_llm(model_name, model_temperature, api_key, max_tokens=None):
5
+ if model_name == "text-davinci-003":
6
+ return OpenAI(temperature=model_temperature, model_name=model_name, max_tokens=max_tokens,
7
+ openai_api_key=api_key)
8
+ else:
9
+ return ChatOpenAI(temperature=model_temperature, model_name=model_name, max_tokens=max_tokens,
10
+ openai_api_key=api_key)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai~=0.27.10
2
+ streamlit~=1.26.0
3
+ langchain~=0.0.285
4
+ metaphor-python~=0.1.16
5
+ wikipedia~=1.4.0
6
+ crawlbase~=1.0.0
socratic.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.schema import (
2
+ AIMessage,
3
+ HumanMessage,
4
+ SystemMessage
5
+ )
6
+ from common import get_llm
7
+ import logging
8
+ import re
9
+
10
+ SOCRATES = "Socrates"
11
+ THEAETETUS = "Theaetetus"
12
+ PLATO = "Plato"
13
+ class SocraticGPT:
14
+ def __init__(self, role, tools, key, n_round=10, model="gpt-3.5-turbo-16k"):
15
+ self.role = role
16
+ self.model = model
17
+ self.n_round = n_round
18
+ self.tools = tools
19
+ self.key = key
20
+
21
+ if self.role == SOCRATES:
22
+ self.other_role = THEAETETUS
23
+ elif self.role == THEAETETUS:
24
+ self.other_role = SOCRATES
25
+
26
+ self.history = []
27
+
28
+ def set_question(self, question):
29
+ instruction_prompt = \
30
+ f"""
31
+ {SOCRATES} and {THEAETETUS} are two advanced AI assistants to solve challenging problems. {SOCRATES} and {THEAETETUS} will engage in multi-round dialogue to solve the problem together. There also another participant {PLATO}, who is acting like a proofreader providing valuable advices. {SOCRATES} and {THEAETETUS} have to listen and follow the advice.
32
+ Their discussion should follow a structured problem-solving approach, such as formalizing the problem, developing high-level strategies for solving the problem, using Agents if necessary, reusing sub-problem solutions where possible, critically evaluating each other's reasoning, avoiding arithmetic and logical errors, and effectively communicating their ideas.
33
+
34
+ They are permitted to consult with the user if they encounter any uncertainties or difficulties. Any responses from user will be provided in the following round. If the main question is not clear they have to seek advise from the user.
35
+ To ask the user use following phrase: <user>insert your question</user>.
36
+
37
+ There is an agent available for usage. agent is also an advanced AI, which can perform a comprehensive web search, information extration from web urls and navigate the Internet. The request to agent must mention your goal and what you want to achieve. Request must contain all the required information. The agent does not know about you dialog or memory of the previous requests, it only performs requested actions. To avoid rate limits, try to break down the request into smaller parts to gather the necessary information.
38
+ To call the agent use following phrase: <agent>insert your request</agent>.
39
+
40
+ Their ultimate objective is to come to a correct solution through reasoned discussion. To present their final answer, they should adhere to the following guidelines:
41
+ - State the problem they were asked to solve.
42
+ - Present any assumptions they made in their reasoning.
43
+ - Detail the logical steps they took to arrive at their final answer.
44
+ - Use the agent to perform specific operations.
45
+ - Asses critically your way of thinking. Apply critical thinking.
46
+ - Conclude with a final statement that directly answers the problem.
47
+
48
+ Their final answer should be concise and free from logical errors, such as false dichotomy, hasty generalization, and circular reasoning.
49
+ Immediately provide the answer if nobody has objections to the solution. If they encounter any issues with the validity of their answer, they should re-evaluate their reasoning and calculations. Before providing the final answer, every participant has to accept the solution or reject it with a clear explaination. Do not provide the answer if someone has reasonable objections to it.
50
+ The final answer should begin with the phrase: <answer>insert your answer</answer>.
51
+
52
+ The dialog must be formatted using Markdown.
53
+
54
+ The problem statement is as follows: ''' {question} '''.
55
+ """
56
+
57
+ # print(instruction_prompt)
58
+
59
+ if self.role == SOCRATES:
60
+ self.history.append(SystemMessage(
61
+ content=instruction_prompt + f"\nNow, suppose that you are {self.role}. Please discuss the problem with {self.other_role}!"))
62
+ self.history.append(AIMessage(
63
+ content=f"Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes."
64
+ ))
65
+ elif self.role == THEAETETUS:
66
+ self.history.append(SystemMessage(
67
+ content=instruction_prompt + f"\nNow, suppose that you are {self.role}. Please discuss the problem with {self.other_role}!"))
68
+ self.history.append(HumanMessage(
69
+ content=f"Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes."
70
+ ))
71
+ elif self.role == PLATO:
72
+ self.history.append(SystemMessage(
73
+ content=instruction_prompt + f"\nNow as a proofreader, {PLATO}, your task is to read through the dialogue between {SOCRATES} and {THEAETETUS} and identify any errors they made."))
74
+ self.history.append(HumanMessage(
75
+ content=f"{SOCRATES}: Hi {THEAETETUS}, let's solve this problem together. Please feel free to correct me if I make any mistakes."
76
+ ))
77
+
78
+ def get_response(self, temperature=0):
79
+ msg = self._call_llm(self.history, temperature)
80
+ logging.debug(f"response: {self.role} : {msg}")
81
+ self.history.append(AIMessage(content=msg))
82
+ return msg
83
+
84
+ def get_proofread(self, temperature=0, history_depth=7):
85
+ pf_template = HumanMessage(
86
+ content=f""" \
87
+ The above is the conversation between {SOCRATES} and {THEAETETUS} and the agent. They were likely to have made multiple mistakes or not follow guidelines or try inefficient way to solve the problem or incorrectly use the agent.
88
+ Asses critically the dialog above. Also asses critically the agent's answers. Explain your reasoning step by step. Be concise. Start your answer with "NO" if you have no reasonable suggestions for improvements, do not say "NO" otherwise.
89
+ """
90
+ )
91
+ # OpenAI token rate limiting prevent the model from answering - we are limiting the history
92
+ history = [self.history[0]] + self.history[-history_depth:] if len(self.history) > history_depth else self.history
93
+ msg = self._call_llm(history + [pf_template], temperature)
94
+ logging.debug(f"proofread: {self.role} : {msg}")
95
+ if msg[:2] in ["NO", "No", "no"]:
96
+ return None
97
+ else:
98
+ pattern = r'^YES\s*\n*'
99
+ msg = re.sub(pattern, '', msg)
100
+ self.history.append(AIMessage(content=f"Message from a proofreader {PLATO}: {msg}"))
101
+ return msg
102
+
103
+ def _call_llm(self, messages, temperature=0):
104
+ try:
105
+ llm = get_llm(model_name=self.model, model_temperature=temperature, api_key=self.key)
106
+ response = llm(messages)
107
+ msg = response.content
108
+ except Exception as e:
109
+ if "maximum context length" in str(e):
110
+ # Handle the maximum context length error here
111
+ msg = "The context length exceeds my limit... "
112
+ else:
113
+ # Handle other errors here
114
+ msg = f"I encounter an error when using my backend model.\n\n Error: {str(e)}"
115
+ return msg
116
+
117
+ def update_history(self, message):
118
+ self.history.append(HumanMessage(content=message))
119
+
120
+ def add_agent_feedback(self, question, answer):
121
+ self.history.append(AIMessage(content=f"Agents's feedback to \"{question}\" is \"{answer}\""))
122
+
123
+ def add_user_feedback(self, question, answer):
124
+ self.history.append(SystemMessage(content=f"User's feedback to \"{question}\" is \"{answer}\""))
125
+
126
+ def add_proofread(self, proofread):
127
+ self.history.append(SystemMessage(content=f"{PLATO}: Message from a proofreader: {proofread}"))
128
+
129
+ @staticmethod
130
+ def get_question(text, pattern):
131
+ matches = re.findall(pattern, text, re.DOTALL)
132
+
133
+ if len(matches) == 0:
134
+ return None
135
+
136
+ return matches
137
+
138
+ def get_user_question(text):
139
+ pattern = r"<user>(.*?)</user>"
140
+ return SocraticGPT.get_question(text, pattern)
141
+
142
+ @staticmethod
143
+ def get_agent_question(text):
144
+ pattern = r"<agent>(.*?)</agent>"
145
+ return SocraticGPT.get_question(text, pattern)
146
+
147
+ @staticmethod
148
+ def get_answer(text):
149
+ pattern = r"<answer>(.*?)</answer>"
150
+ return SocraticGPT.get_question(text, pattern)
151
+
152
+
153
+
154
+
155
+
156
+
tool.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from metaphor_python import Metaphor
2
+ from langchain.agents import tool
3
+ from typing import List, Optional
4
+ from langchain.tools.base import ToolException
5
+ from langchain.tools import WikipediaQueryRun
6
+ from langchain.utilities import WikipediaAPIWrapper
7
+ from crawlbase import CrawlingAPI
8
+ import streamlit as st
9
+
10
+
11
+ client = Metaphor(api_key=st.secrets["METAPHOR_API_KEY"])
12
+
13
+ @tool
14
+ def search(query: str, num_results: Optional[int] = None, include_domains: Optional[List[str]] = None, exclude_domains: Optional[List[str]] = None):
15
+ """Call search engine with a query """
16
+ try:
17
+ result = client.search(query, use_autoprompt=True, num_results=num_results, include_domains=include_domains, exclude_domains=exclude_domains)
18
+ if "I'm sorry" in result.autoprompt_string:
19
+ raise Exception(result.autoprompt_string)
20
+ return result
21
+ except Exception as e:
22
+ raise ToolException(e.args[0])
23
+
24
+
25
+ @tool
26
+ def get_contents(ids: List[str]):
27
+ """Get contents of a webpage. May return an empty content, it means you have to use another tool to get the content.
28
+
29
+ The ids passed in should be a list of ids as fetched from `search`.
30
+ """
31
+ try:
32
+ return client.get_contents(ids)
33
+ except Exception as e:
34
+ raise ToolException(e.args[0])
35
+
36
+
37
+ @tool
38
+ def find_similar(url: str, num_results: Optional[int] = None, include_domains: Optional[List[str]] = None, exclude_domains: Optional[List[str]] = None):
39
+ """Get search results similar to a given URL.
40
+
41
+ The url passed in should be a URL returned from `search`
42
+ """
43
+ try:
44
+ return client.find_similar(url, num_results=num_results, include_domains=include_domains, exclude_domains=exclude_domains)
45
+ except Exception as e:
46
+ raise ToolException(e.args[0])
47
+
48
+
49
+ crawling_api_key = st.secrets["CRAWLING_API_KEY"]
50
+ api = CrawlingAPI({'token': crawling_api_key})
51
+
52
+
53
+ @tool
54
+ def scrape_page(url: str):
55
+ """Get content of a given URL to process by an agent. in a json format like: dict_keys(['alert', 'title', 'favicon', 'meta', 'content', 'canonical', 'images', 'grouped_images', 'og_images', 'links'])
56
+ """
57
+ response = api.get(url, options={'format': 'json', 'autoparse': 'true', 'scroll': 'true'})
58
+ content = response['json']
59
+ return content
60
+
61
+
62
+ #TODO: list attibutes to return directly like content_type=['alert', 'title', 'favicon', 'meta', 'content', 'canonical', 'images', 'grouped_images', 'og_images', 'links']]
63
+ @tool(return_direct=True)
64
+ def scrape_page_and_return_the_content_directly(url: str):
65
+ """Use this tool to directly get content of a given URL without processing it. in a json format like: dict_keys(['alert', 'title', 'favicon', 'meta', 'content', 'canonical', 'images', 'grouped_images', 'og_images', 'links'])
66
+ """
67
+ response = api.get(url, options={'format': 'json', 'autoparse': 'true', 'scroll': 'true'})
68
+ content = response['json']
69
+ return content
70
+
71
+
72
+ def get_tools():
73
+ wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
74
+ wikipedia.description = wikipedia.description + " Prioritise this tool if you want to learn about facts."
75
+ return [scrape_page, search, get_contents, find_similar, wikipedia, scrape_page_and_return_the_content_directly]
76
+ # return [scrape_page]