Ridealist JUNGU commited on
Commit
396c3e3
β€’
0 Parent(s):

Duplicate from JUNGU/talktosayno

Browse files

Co-authored-by: HAN JUNGU <[email protected]>

Files changed (5) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +110 -0
  4. docs.pdf +3 -0
  5. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ docs.pdf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Talktosayno
3
+ emoji: πŸ“‰
4
+ colorFrom: green
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.34.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: openrail
11
+ duplicated_from: JUNGU/talktosayno
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chat_models import ChatOpenAI
2
+ from langchain.document_loaders import PyPDFLoader
3
+ from langchain.embeddings.openai import OpenAIEmbeddings
4
+ from langchain.embeddings.cohere import CohereEmbeddings
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
7
+ from langchain.vectorstores import Chroma
8
+ from PyPDF2 import PdfWriter
9
+ import gradio as gr
10
+ import os
11
+ from dotenv import load_dotenv
12
+ import openai
13
+
14
+ load_dotenv()
15
+ #λΉ„λ°€ν‚€ κ°€μ Έμ˜€κΈ° μ‹œλ„μ€‘
16
+ # api_key = os.getenv('OPENAI_API_KEY') ## .env 파일 μ—…λ‘œλ“œν•˜λ©΄ μˆ¨κ²¨μ§€μ§€ μ•ŠμŒ μ•ˆλ¨
17
+ # api_key = os.environ['my_secret'] ## μ•ˆλΆˆλŸ¬μ™€μ§
18
+ # api_key = os.getenv('my_secret') ## 3트 .env λŒ€μ‹  secretν‚€λ₯Ό λΆˆλŸ¬μ˜€λŠ” ν˜•νƒœλ‘œ 도전
19
+ os.environ["OPENAI_API_KEY"] = os.environ['my_secret']
20
+
21
+ loader = PyPDFLoader("/home/user/app/docs.pdf")
22
+ documents = loader.load()
23
+
24
+ text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=0)
25
+ texts = text_splitter.split_documents(documents)
26
+
27
+ #vector embedding
28
+ embeddings = OpenAIEmbeddings()
29
+ vector_store = Chroma.from_documents(texts, embeddings)
30
+ retriever = vector_store.as_retriever(search_kwargs={"k": 2})
31
+
32
+ from langchain.chat_models import ChatOpenAI
33
+ from langchain.chains import RetrievalQAWithSourcesChain
34
+
35
+ llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Modify model_name if you have access to GPT-4
36
+
37
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
38
+ llm=llm,
39
+ chain_type="stuff",
40
+ retriever = retriever,
41
+ return_source_documents=True)
42
+
43
+ from langchain.prompts.chat import (
44
+ ChatPromptTemplate,
45
+ SystemMessagePromptTemplate,
46
+ HumanMessagePromptTemplate,
47
+ )
48
+
49
+ system_template="""Use the following pieces of context to answer the users question shortly.
50
+ Given the following summaries of a long document and a question, create a final answer with references ("SOURCES"), use "SOURCES" in capital letters regardless of the number of sources.
51
+ If you don't know the answer, just say that "I don't know", don't try to make up an answer.
52
+ ----------------
53
+ {summaries}
54
+
55
+ You MUST answer in Korean and in Markdown format:"""
56
+
57
+ messages = [
58
+ SystemMessagePromptTemplate.from_template(system_template),
59
+ HumanMessagePromptTemplate.from_template("{question}")
60
+ ]
61
+
62
+ prompt = ChatPromptTemplate.from_messages(messages)
63
+
64
+ from langchain.chat_models import ChatOpenAI
65
+ from langchain.chains import RetrievalQAWithSourcesChain
66
+
67
+ chain_type_kwargs = {"prompt": prompt}
68
+
69
+ llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Modify model_name if you have access to GPT-4
70
+
71
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
72
+ llm=llm,
73
+ chain_type="stuff",
74
+ retriever = retriever,
75
+ return_source_documents=True,
76
+ chain_type_kwargs=chain_type_kwargs
77
+ )
78
+
79
+ query = "ν–‰λ³΅ν•œ μΈμƒμ΄λž€?"
80
+ result = chain(query)
81
+
82
+
83
+ for doc in result['source_documents']:
84
+ print('λ‚΄μš© : ' + doc.page_content[0:100].replace('\n', ' '))
85
+ print('파일 : ' + doc.metadata['source'])
86
+ print('νŽ˜μ΄μ§€ : ' + str(doc.metadata['page']))
87
+
88
+
89
+ def respond(message, chat_history): # μ±„νŒ…λ΄‡μ˜ 응닡을 μ²˜λ¦¬ν•˜λŠ” ν•¨μˆ˜λ₯Ό μ •μ˜ν•©λ‹ˆλ‹€.
90
+
91
+ result = chain(message)
92
+
93
+ bot_message = result['answer']
94
+
95
+ for i, doc in enumerate(result['source_documents']):
96
+ bot_message += '[' + str(i+1) + '] ' + doc.metadata['source'] + '(' + str(doc.metadata['page']) + ') '
97
+
98
+ chat_history.append((message, bot_message)) # μ±„νŒ… 기둝에 μ‚¬μš©μžμ˜ λ©”μ‹œμ§€μ™€ λ΄‡μ˜ 응닡을 μΆ”κ°€ν•©λ‹ˆλ‹€.
99
+
100
+ return "", chat_history # μˆ˜μ •λœ μ±„νŒ… 기둝을 λ°˜ν™˜ν•©λ‹ˆλ‹€.
101
+
102
+ with gr.Blocks(theme='gstaff/sketch') as demo: # gr.Blocks()λ₯Ό μ‚¬μš©ν•˜μ—¬ μΈν„°νŽ˜μ΄μŠ€λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
103
+ gr.Markdown("# μ•ˆλ…•ν•˜μ„Έμš”. 세이노와 λŒ€ν™”ν•΄λ³΄μ„Έμš”.")
104
+ chatbot = gr.Chatbot(label="μ±„νŒ…μ°½") # 'μ±„νŒ…μ°½'μ΄λΌλŠ” λ ˆμ΄λΈ”μ„ 가진 μ±„νŒ…λ΄‡ μ»΄ν¬λ„ŒνŠΈλ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
105
+ msg = gr.Textbox(label="μž…λ ₯") # 'μž…λ ₯'μ΄λΌλŠ” λ ˆμ΄λΈ”μ„ 가진 ν…μŠ€νŠΈλ°•μŠ€λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
106
+ clear = gr.Button("μ΄ˆκΈ°ν™”") # 'μ΄ˆκΈ°ν™”'λΌλŠ” λ ˆμ΄λΈ”μ„ 가진 λ²„νŠΌμ„ μƒμ„±ν•©λ‹ˆλ‹€.
107
+
108
+ msg.submit(respond, [msg, chatbot], [msg, chatbot]) # ν…μŠ€νŠΈλ°•μŠ€μ— λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜κ³  μ œμΆœν•˜λ©΄ respond ν•¨μˆ˜κ°€ ν˜ΈμΆœλ˜λ„λ‘ ν•©λ‹ˆλ‹€.
109
+ clear.click(lambda: None, None, chatbot, queue=False) # 'μ΄ˆκΈ°ν™”' λ²„νŠΌμ„ ν΄λ¦­ν•˜λ©΄ μ±„νŒ… 기둝을 μ΄ˆκΈ°ν™”ν•©λ‹ˆλ‹€.
110
+ demo.launch(debug=True) # μΈν„°νŽ˜μ΄μŠ€λ₯Ό μ‹€ν–‰ν•©λ‹ˆλ‹€. μ‹€ν–‰ν•˜λ©΄ μ‚¬μš©μžλŠ” 'μž…λ ₯' ν…μŠ€νŠΈλ°•μŠ€μ— λ©”μ‹œμ§€λ₯Ό μž‘μ„±ν•˜κ³  μ œμΆœν•  수 있으며, 'μ΄ˆκΈ°ν™”' λ²„νŠΌμ„ 톡해 μ±„νŒ… 기둝을 μ΄ˆκΈ°ν™” ν•  수 μžˆμŠ΅λ‹ˆλ‹€.
docs.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dab840d01bd8582e930da5ccb74c032279e832ed02f7f938953e7f77730d1ad2
3
+ size 4232031
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai
2
+ langchain
3
+ pypdf
4
+ chromadb
5
+ tiktoken
6
+ PyPDF2