Dhritiman Sagar commited on
Commit
2689b43
Β·
0 Parent(s):

Added startup files

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.sqlite filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ COPY ./requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI Ethics Bot πŸš€πŸ€–
2
+
3
+ Hello visitor! I'm a helpful bot designed to calm your anxieties about AI Safety. I understand that you may be concerned about the implications of AI. Perhaps you're looking for help about how to build etical and safe AI systems.
4
+
5
+ Have no fear! Various organizations have put a lot of thought into this. Some useful links below:
6
+
7
+ - [Blueprint for an AI Bill of Rights](https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf): A document published by the White House's Office of Science and Technology Policy
8
+ - [NIST AI Risk Management Framework](https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf): A framework for managing risk in AI systems published by the National Institute of Standards and Technology.
9
+
10
+ I've indexed these documents and am ready to answer any questions you may have.
11
+
12
+ To begin, install dependencies as: `pip install -r requirements.txt`
13
+
14
+ Thereafter run `chainlit run app.py`
ai_ethics_te3/collection/ai_ethics_te3/storage.sqlite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950502e99e34e0a9d3fa5924a93768f0c4642d4ef834907fa632fdc5ed5f05a5
3
+ size 4833280
ai_ethics_te3/meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"collections": {"ai_ethics_te3": {"vectors": {"size": 1536, "distance": "Cosine", "hnsw_config": null, "quantization_config": null, "on_disk": null, "datatype": null, "multivector_config": null}, "shard_number": null, "sharding_method": null, "replication_factor": null, "write_consistency_factor": null, "on_disk_payload": null, "hnsw_config": null, "wal_config": null, "optimizers_config": null, "init_from": null, "quantization_config": null, "sparse_vectors": null}}, "aliases": {}}
app.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tiktoken
2
+ import os
3
+ from qdrant_client import QdrantClient
4
+ from langchain_qdrant import QdrantVectorStore
5
+ from qdrant_client.http.models import Distance, VectorParams
6
+ from typing import List
7
+ from chainlit.types import AskFileResponse
8
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
9
+ from langchain_community.document_loaders import PyMuPDFLoader
10
+ from langchain_community.vectorstores import Qdrant
11
+ from langchain_openai.llms import OpenAI
12
+ from langchain_openai.chat_models import ChatOpenAI
13
+ from langchain_openai.embeddings import OpenAIEmbeddings
14
+ from langchain.chains.summarize import load_summarize_chain
15
+ from langchain.chains.conversation.memory import ConversationSummaryBufferMemory
16
+ from langchain.chains.conversation.base import ConversationChain
17
+ from langchain_core.runnables import RunnableParallel, RunnablePassthrough
18
+ from langchain_core.messages import SystemMessage, AIMessage, HumanMessage
19
+ from langchain_core.prompts import (ChatMessagePromptTemplate, SystemMessagePromptTemplate,
20
+ AIMessagePromptTemplate, HumanMessagePromptTemplate)
21
+ from langchain_community.chat_message_histories import ChatMessageHistory
22
+ from langchain_core.chat_history import BaseChatMessageHistory
23
+ from langchain_core.runnables.history import RunnableWithMessageHistory
24
+ from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
25
+ from langchain_core.pydantic_v1 import BaseModel, Field
26
+ from langchain.output_parsers import PydanticOutputParser
27
+ from langchain.output_parsers import OutputFixingParser
28
+ import chainlit as cl
29
+
30
+ from dotenv import load_dotenv; _ = load_dotenv()
31
+
32
+ RAG_PROMPT = """
33
+ Please answer the question below using the provided context. If the question cannnot be answered
34
+ using the context, politely state that you can't answer that question.
35
+
36
+ Question:
37
+ {question}
38
+
39
+ Context:
40
+ {context}
41
+ """
42
+
43
+ def get_rag_chain():
44
+ """Fetches a simple RAG chain"""
45
+ prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
46
+ embedding = OpenAIEmbeddings(model='text-embedding-3-small')
47
+ retriever = QdrantVectorStore.from_existing_collection(
48
+ collection_name='ai_ethics_te3_small',
49
+ embedding=embedding,
50
+ url=os.environ.get('QDRANT_DB'),
51
+ api_key=os.environ.get('QDRANT_API_KEY')
52
+ ).as_retriever()
53
+ llm = ChatOpenAI(model='gpt-4o', temperature=0)
54
+ rag_chain = ({'context': retriever, 'question': RunnablePassthrough()}
55
+ | prompt
56
+ | llm)
57
+ return rag_chain
58
+
59
+
60
+ @cl.on_chat_start
61
+ async def on_chat_start():
62
+ """Initialization of the application"""
63
+ msg = cl.Message(
64
+ content="", disable_human_feedback=True
65
+ )
66
+ await msg.send()
67
+ chain = get_rag_chain()
68
+ # Let the user know that the system is ready
69
+ msg.content = """
70
+ I'm ready to answer any of your questions about the framework for [AI Bill of Rights](https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf)
71
+ and [NIST AI Risk Management Framework](https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf)
72
+ Ask away!
73
+ """
74
+ await msg.update()
75
+
76
+ cl.user_session.set("chain", chain)
77
+
78
+
79
+ @cl.on_message
80
+ async def main(message):
81
+ """Run on every user message"""
82
+ chain = cl.user_session.get("chain")
83
+
84
+ msg = cl.Message(content="")
85
+ async for resp in chain.astream(message.content):
86
+ await msg.stream_token(resp.content)
87
+
88
+ await msg.send()
chainlit.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI Ethics Bot πŸš€πŸ€–
2
+
3
+ Hello visitor! I'm a helpful bot designed to calm your anxieties about AI Safety. I understand that you may be concerned about the implications of AI. Perhaps you're looking for help about how to build etical and safe AI systems.
4
+
5
+ Have no fear! Various organizations have put a lot of thought into this. Some useful links below:
6
+
7
+ - [Blueprint for an AI Bill of Rights](https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf): A document published by the White House's Office of Science and Technology Policy
8
+ - [NIST AI Risk Management Framework](https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf): A framework for managing risk in AI systems published by the National Institute of Standards and Technology.
9
+
10
+ I've indexed these documents and am ready to answer any questions you may have.
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ tiktoken
2
+ numpy
3
+ chainlit==0.7.700
4
+ openai
5
+ langchain
6
+ langchain-core
7
+ langchain-community
8
+ langchain-openai
9
+ langchain-qdrant
10
+ langchain-huggingface