Spaces:
Sleeping
Sleeping
"Medical Document Assistant APP with LLM RAG framework --YY
Browse files- .dockerignore +8 -0
- .gitattributes +37 -35
- .gitignore +8 -0
- Dockerfile +23 -0
- README.md +39 -12
- chainlit.md +14 -0
- data/The_GALE_ENCYCLOPEDIA_of_MEDICINE_SECOND.pdf +3 -0
- docker-compose.yml +8 -0
- ingest.py +28 -0
- llama-2-7b-chat.ggmlv3.q8_0.bin +3 -0
- model.py +110 -0
- requirements.txt +25 -0
- vectorstore/db_faiss/index.faiss +3 -0
- vectorstore/db_faiss/index.pkl +3 -0
.dockerignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
__pycache__
|
3 |
+
.idea
|
4 |
+
.gitignore
|
5 |
+
.git
|
6 |
+
README.md
|
7 |
+
Dockerfile*
|
8 |
+
docker-compose*
|
.gitattributes
CHANGED
@@ -1,35 +1,37 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
data/The_GALE_ENCYCLOPEDIA_of_MEDICINE_SECOND.pdf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
vectorstore/db_faiss/index.faiss filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.chainlit
|
2 |
+
.files
|
3 |
+
langchain
|
4 |
+
vectorstore
|
5 |
+
*.bin
|
6 |
+
logs.txt
|
7 |
+
.idea
|
8 |
+
__pycache__
|
Dockerfile
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the official python 3.9 image
|
2 |
+
FROM python:3.10-slim-bullseye
|
3 |
+
|
4 |
+
# Set the working directory to corrent folder
|
5 |
+
WORKDIR .
|
6 |
+
|
7 |
+
RUN useradd -m -u 1000 user
|
8 |
+
USER user
|
9 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
10 |
+
|
11 |
+
# Copy the requirements to working directory
|
12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
13 |
+
|
14 |
+
# Install all packages in requirements.txt
|
15 |
+
RUN pip install --no-cache-dir --upgrade -r ./requirements.txt
|
16 |
+
|
17 |
+
WORKDIR app/
|
18 |
+
COPY --chown=user . /app
|
19 |
+
|
20 |
+
EXPOSE 7860
|
21 |
+
|
22 |
+
CMD ["python", "-m", "chainlit", "run", "model.py", "-h", "--port", "7860", "--host", "0.0.0.0"]
|
23 |
+
|
README.md
CHANGED
@@ -1,12 +1,39 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Necessary resources
|
2 |
+
|
3 |
+
### Model must be downloaded to local ai_workshop folder:
|
4 |
+
Llama 2 Model (Quantized one by the Bloke): https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q8_0.bin
|
5 |
+
|
6 |
+
### License and other reference
|
7 |
+
The code in all scripts subjects to a licence of 96harsh52/LLaMa_2_chatbot (https://github.com/96harsh52/LLaMa_2_chatbot)
|
8 |
+
Youtube instruction (https://www.youtube.com/watch?v=kXuHxI5ZcG0&list=PLrLEqwuz-mRIdQrfeCjeCyFZ-Pl6ffPIN&index=18)
|
9 |
+
|
10 |
+
Llama 2 HF Model (Original One): https://huggingface.co/meta-llama
|
11 |
+
Chainlit docs: https://github.com/Chainlit/chainlit
|
12 |
+
|
13 |
+
## Create virtual Environment
|
14 |
+
|
15 |
+
1. Create Virtual env:
|
16 |
+
>`cd ai_workshop`
|
17 |
+
>`python -m venv langchain`
|
18 |
+
|
19 |
+
2. Activate virtual evn:
|
20 |
+
>`langchain\Scripts\activate`
|
21 |
+
|
22 |
+
*NOTE: if you see the read warning in cmd terminal said "running scripts is disabled on this system" , use Powershell to setup API server:
|
23 |
+
1. open Powershell
|
24 |
+
> `Set-ExecutionPolicy Unrestricted -Scope Process`
|
25 |
+
2. activate virtual env as previous steps
|
26 |
+
|
27 |
+
3. install requirements.txt
|
28 |
+
> `python -m ensurepip --upgrade`
|
29 |
+
> `python -m pip install --upgrade setuptools`
|
30 |
+
> `python -m pip install -r requirements.txt`
|
31 |
+
|
32 |
+
|
33 |
+
## Create local vectors storage database
|
34 |
+
|
35 |
+
After activate virtual environment, run `python .\ingest.py`
|
36 |
+
|
37 |
+
## Setup Medical chatbot server with chainlit
|
38 |
+
|
39 |
+
After set up the database folder of "vectorstore/db_faiss", run `chainlit run .\model.py > logs.txt`
|
chainlit.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Welcome to Chainlit! 🚀🤖
|
2 |
+
|
3 |
+
Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
|
4 |
+
|
5 |
+
## Useful Links 🔗
|
6 |
+
|
7 |
+
- **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
|
8 |
+
- **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬
|
9 |
+
|
10 |
+
We can't wait to see what you create with Chainlit! Happy coding! 💻😊
|
11 |
+
|
12 |
+
## Welcome screen
|
13 |
+
|
14 |
+
To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
|
data/The_GALE_ENCYCLOPEDIA_of_MEDICINE_SECOND.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b2e1ac7a12e3f9a97bbd997972f27cb13786de256de83627a51e69d09208973
|
3 |
+
size 12226938
|
docker-compose.yml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
aiworkshop:
|
3 |
+
image: aiworkshop:latest
|
4 |
+
build: .
|
5 |
+
ports:
|
6 |
+
- "7860:7860"
|
7 |
+
# command:
|
8 |
+
# - chainlit run /app/model.py --server.port 8080
|
ingest.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
2 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
3 |
+
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
|
4 |
+
from langchain_community.vectorstores import FAISS
|
5 |
+
from langchain_community.llms import HuggingFaceHub
|
6 |
+
|
7 |
+
DATA_PATH = 'data/'
|
8 |
+
DB_FAISS_PATH = 'vectorstore/db_faiss'
|
9 |
+
|
10 |
+
# ingest model and create vector database
|
11 |
+
def create_vector_db():
|
12 |
+
loader = DirectoryLoader(DATA_PATH,
|
13 |
+
glob='*.pdf',
|
14 |
+
loader_cls=PyPDFLoader)
|
15 |
+
documents = loader.load()
|
16 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
|
17 |
+
chunk_overlap=50)
|
18 |
+
texts = text_splitter.split_documents(documents)
|
19 |
+
|
20 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
|
21 |
+
model_kwargs={'device': 'cpu'})
|
22 |
+
db = FAISS.from_documents(texts, embeddings)
|
23 |
+
db.save_local(DB_FAISS_PATH)
|
24 |
+
return db
|
25 |
+
|
26 |
+
|
27 |
+
if __name__ == "__main__":
|
28 |
+
create_vector_db()
|
llama-2-7b-chat.ggmlv3.q8_0.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3bfdde943555c78294626a6ccd40184162d066d39774bd2c98dae24943d32cc3
|
3 |
+
size 7160799872
|
model.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The code in this script subjects to a licence of 96harsh52/LLaMa_2_chatbot (https://github.com/96harsh52/LLaMa_2_chatbot)
|
3 |
+
Youtube instruction (https://www.youtube.com/watch?v=kXuHxI5ZcG0&list=PLrLEqwuz-mRIdQrfeCjeCyFZ-Pl6ffPIN&index=18)
|
4 |
+
Llama 2 Model (Quantized one by the Bloke): https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q8_0.bin
|
5 |
+
Llama 2 HF Model (Original One): https://huggingface.co/meta-llama
|
6 |
+
Chainlit docs: https://github.com/Chainlit/chainlit
|
7 |
+
"""
|
8 |
+
|
9 |
+
from langchain import PromptTemplate
|
10 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
11 |
+
from langchain_community.vectorstores import FAISS
|
12 |
+
from langchain.chains import RetrievalQA
|
13 |
+
from langchain_community.llms import CTransformers
|
14 |
+
import chainlit as cl
|
15 |
+
|
16 |
+
DB_FAISS_PATH = 'vectorstore/db_faiss'
|
17 |
+
|
18 |
+
custom_prompt_template = """Use the following pieces of information to answer the user's question.
|
19 |
+
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
20 |
+
|
21 |
+
Context: {context}
|
22 |
+
Question: {question}
|
23 |
+
|
24 |
+
Only return the helpful answer below and nothing else.
|
25 |
+
Helpful answer:
|
26 |
+
"""
|
27 |
+
|
28 |
+
|
29 |
+
def set_custom_prompt():
|
30 |
+
"""
|
31 |
+
Prompt template for QA retrieval for each vectorstore
|
32 |
+
"""
|
33 |
+
prompt = PromptTemplate(template=custom_prompt_template,
|
34 |
+
input_variables=['context', 'question'])
|
35 |
+
return prompt
|
36 |
+
|
37 |
+
|
38 |
+
def load_llm():
|
39 |
+
"""
|
40 |
+
Load the language model
|
41 |
+
"""
|
42 |
+
llm = CTransformers(model='llama-2-7b-chat.ggmlv3.q8_0.bin',
|
43 |
+
model_type='llama',
|
44 |
+
max_new_tokens=512,
|
45 |
+
temperature=0.5)
|
46 |
+
return llm
|
47 |
+
|
48 |
+
|
49 |
+
def retrieval_qa_chain(llm, prompt, db):
|
50 |
+
"""
|
51 |
+
Create a retrieval QA chain
|
52 |
+
"""
|
53 |
+
qa_chain = RetrievalQA.from_chain_type(
|
54 |
+
llm=llm,
|
55 |
+
chain_type='stuff',
|
56 |
+
retriever=db.as_retriever(search_kwargs={'k': 2}),
|
57 |
+
return_source_documents=True,
|
58 |
+
chain_type_kwargs={'prompt': prompt}
|
59 |
+
)
|
60 |
+
return qa_chain
|
61 |
+
|
62 |
+
|
63 |
+
def qa_bot():
|
64 |
+
"""
|
65 |
+
Create a QA bot
|
66 |
+
"""
|
67 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
|
68 |
+
model_kwargs={'device': 'cpu'})
|
69 |
+
db = FAISS.load_local(DB_FAISS_PATH, embeddings, allow_dangerous_deserialization=True)
|
70 |
+
llm = load_llm()
|
71 |
+
qa_prompt = set_custom_prompt()
|
72 |
+
qa = retrieval_qa_chain(llm, qa_prompt, db)
|
73 |
+
return qa
|
74 |
+
|
75 |
+
|
76 |
+
def final_result(query):
|
77 |
+
qa_result = qa_bot()
|
78 |
+
response = qa_result({'query': query})
|
79 |
+
return response
|
80 |
+
|
81 |
+
|
82 |
+
@cl.on_chat_start
|
83 |
+
async def start():
|
84 |
+
chain = qa_bot()
|
85 |
+
msg = cl.Message(content="Starting the bot...")
|
86 |
+
await msg.send()
|
87 |
+
msg.content = "Hi, Welcome to Medical Chatbot. What is your query?"
|
88 |
+
await msg.update()
|
89 |
+
cl.user_session.set("chain", chain)
|
90 |
+
|
91 |
+
|
92 |
+
@cl.on_message
|
93 |
+
async def main(message: cl.Message):
|
94 |
+
chain = cl.user_session.get("chain")
|
95 |
+
cb = cl.AsyncLangchainCallbackHandler(
|
96 |
+
stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
|
97 |
+
)
|
98 |
+
cb.answer_reached = True
|
99 |
+
res = await chain.acall(message.content, callbacks=[cb])
|
100 |
+
answer = res["result"]
|
101 |
+
sources = res["source_documents"]
|
102 |
+
|
103 |
+
if sources:
|
104 |
+
answer += f"\nSources:" + str(sources)
|
105 |
+
else:
|
106 |
+
answer += "\nNo sources found"
|
107 |
+
|
108 |
+
await cl.Message(content=answer).send()
|
109 |
+
|
110 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
matplotlib
|
2 |
+
plotly
|
3 |
+
scikit-learn
|
4 |
+
azure-ai-formrecognizer
|
5 |
+
azure-cognitiveservices-speech
|
6 |
+
azure-ai-textanalytics
|
7 |
+
python-dotenv
|
8 |
+
huggingface_hub
|
9 |
+
accelerate>=0.16.0,<1
|
10 |
+
transformers[torch]
|
11 |
+
safetensors
|
12 |
+
tensorflow
|
13 |
+
datasets
|
14 |
+
evaluate
|
15 |
+
langchain==0.3.3
|
16 |
+
langchain-community==0.3.2
|
17 |
+
streamlit
|
18 |
+
pypdf
|
19 |
+
tiktoken
|
20 |
+
faiss-cpu
|
21 |
+
google-search-results
|
22 |
+
sentence-transformers
|
23 |
+
chainlit
|
24 |
+
tf-keras
|
25 |
+
ctransformers
|
vectorstore/db_faiss/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dddac424aced14116b13523b7a1ba6dc67e52ebcbff17fd280457955b735356b
|
3 |
+
size 10873389
|
vectorstore/db_faiss/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca969d9d3c1ded0b845754f3a22f1cf9f68174d93d8e50731f9474b4b4d386ab
|
3 |
+
size 3984324
|