Merge pull request #3 from MachineLearningReply/other/AI-audit-agent
Browse files
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
app.py
CHANGED
@@ -38,7 +38,7 @@ def manage_files(modal, document_store):
|
|
38 |
if modal.is_open():
|
39 |
with modal.container():
|
40 |
uploaded_file = st.file_uploader(
|
41 |
-
"Upload a
|
42 |
type=("pdf",),
|
43 |
on_change=new_file(),
|
44 |
disabled=st.session_state['document_qa_model'] is None,
|
@@ -57,7 +57,7 @@ def manage_files(modal, document_store):
|
|
57 |
if uploaded_file:
|
58 |
st.session_state['file_uploaded'] = True
|
59 |
st.session_state['files'] = pd.concat([st.session_state['files'], edited_df])
|
60 |
-
with st.spinner('Processing the
|
61 |
store_file_in_table(document_store, uploaded_file)
|
62 |
ingest_document(uploaded_file)
|
63 |
|
@@ -103,7 +103,7 @@ def init_session_state():
|
|
103 |
|
104 |
def set_page_config():
|
105 |
st.set_page_config(
|
106 |
-
page_title="
|
107 |
page_icon=":shark:",
|
108 |
initial_sidebar_state="expanded",
|
109 |
layout="wide",
|
|
|
38 |
if modal.is_open():
|
39 |
with modal.container():
|
40 |
uploaded_file = st.file_uploader(
|
41 |
+
"Upload a document in PDF format",
|
42 |
type=("pdf",),
|
43 |
on_change=new_file(),
|
44 |
disabled=st.session_state['document_qa_model'] is None,
|
|
|
57 |
if uploaded_file:
|
58 |
st.session_state['file_uploaded'] = True
|
59 |
st.session_state['files'] = pd.concat([st.session_state['files'], edited_df])
|
60 |
+
with st.spinner('Processing the document content...'):
|
61 |
store_file_in_table(document_store, uploaded_file)
|
62 |
ingest_document(uploaded_file)
|
63 |
|
|
|
103 |
|
104 |
def set_page_config():
|
105 |
st.set_page_config(
|
106 |
+
page_title="AI Audit Assistant",
|
107 |
page_icon=":shark:",
|
108 |
initial_sidebar_state="expanded",
|
109 |
layout="wide",
|
document_qa_engine.py
CHANGED
@@ -76,12 +76,13 @@ def create_inference_pipeline(document_store, model_name, api_key):
|
|
76 |
generator = OpenAIChatGenerator(api_key=Secret.from_token("<local LLM doesn't need an API key>"),
|
77 |
model=model_name,
|
78 |
api_base_url="http://localhost:1234/v1",
|
79 |
-
generation_kwargs={"max_tokens": MAX_TOKENS}
|
80 |
)
|
81 |
elif "gpt" in model_name:
|
82 |
generator = OpenAIChatGenerator(api_key=Secret.from_token(api_key), model=model_name,
|
83 |
-
generation_kwargs={"max_tokens": MAX_TOKENS},
|
84 |
streaming_callback=lambda chunk: print(chunk.content, end="", flush=True),
|
|
|
85 |
)
|
86 |
else:
|
87 |
generator = HuggingFaceTGIChatGenerator(token=Secret.from_token(api_key), model=model_name,
|
@@ -118,7 +119,7 @@ class DocumentQAEngine:
|
|
118 |
|
119 |
def inference(self, query, input_messages: List[dict]):
|
120 |
system_message = ChatMessage.from_system(
|
121 |
-
"You are a professional
|
122 |
messages = [system_message]
|
123 |
for message in input_messages:
|
124 |
if message["role"] == "user":
|
@@ -127,7 +128,7 @@ class DocumentQAEngine:
|
|
127 |
messages.append(
|
128 |
ChatMessage.from_user(message["content"]))
|
129 |
messages.append(ChatMessage.from_user("""
|
130 |
-
Relevant information from the uploaded
|
131 |
{% for doc in documents %}
|
132 |
{{ doc.content }}
|
133 |
{% endfor %}
|
|
|
76 |
generator = OpenAIChatGenerator(api_key=Secret.from_token("<local LLM doesn't need an API key>"),
|
77 |
model=model_name,
|
78 |
api_base_url="http://localhost:1234/v1",
|
79 |
+
generation_kwargs={"max_tokens": MAX_TOKENS},
|
80 |
)
|
81 |
elif "gpt" in model_name:
|
82 |
generator = OpenAIChatGenerator(api_key=Secret.from_token(api_key), model=model_name,
|
83 |
+
generation_kwargs={"max_tokens": MAX_TOKENS, "temperature": 0},
|
84 |
streaming_callback=lambda chunk: print(chunk.content, end="", flush=True),
|
85 |
+
|
86 |
)
|
87 |
else:
|
88 |
generator = HuggingFaceTGIChatGenerator(token=Secret.from_token(api_key), model=model_name,
|
|
|
119 |
|
120 |
def inference(self, query, input_messages: List[dict]):
|
121 |
system_message = ChatMessage.from_system(
|
122 |
+
"You are a professional analyzer of git repos, having access to the repo content. In 1-3 sentences")
|
123 |
messages = [system_message]
|
124 |
for message in input_messages:
|
125 |
if message["role"] == "user":
|
|
|
128 |
messages.append(
|
129 |
ChatMessage.from_user(message["content"]))
|
130 |
messages.append(ChatMessage.from_user("""
|
131 |
+
Relevant information from the uploaded repo:
|
132 |
{% for doc in documents %}
|
133 |
{{ doc.content }}
|
134 |
{% endfor %}
|
utils.py
CHANGED
@@ -50,7 +50,7 @@ def append_documentation_to_sidebar():
|
|
50 |
with st.expander("Documentation"):
|
51 |
st.markdown(
|
52 |
"""
|
53 |
-
Upload
|
54 |
be displayed in the right column. The system will answer your questions using the content of the document
|
55 |
and mark refrences over the PDF viewer.
|
56 |
""")
|
|
|
50 |
with st.expander("Documentation"):
|
51 |
st.markdown(
|
52 |
"""
|
53 |
+
Upload document as PDF document. Once the spinner stops, you can proceed to ask your questions. The answers will
|
54 |
be displayed in the right column. The system will answer your questions using the content of the document
|
55 |
and mark refrences over the PDF viewer.
|
56 |
""")
|