DevBM commited on
Commit
36eaeaf
·
verified ·
1 Parent(s): 1247fc7

Upload 2 files

Browse files
Files changed (2) hide show
  1. chatllama3.py +40 -0
  2. requirements.txt +13 -0
chatllama3.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ from transformers import AutoConfig, pipeline
4
+ from langchain_community.llms import Ollama
5
+ import time
6
+
7
+ st.title('*ChatBot clone*')
8
+
9
+ llm = Ollama(model='llama3:latest')
10
+
11
+ def response_generator(prompt):
12
+ response = llm.invoke(prompt, stop=['<|eot_id|>'])
13
+ for word in response.split():
14
+ yield word + " "
15
+ time.sleep(0.05)
16
+
17
+
18
+ # init chat history
19
+ if "messages" not in st.session_state:
20
+ st.session_state.messages = []
21
+
22
+ # display chat history
23
+ for message in st.session_state.messages:
24
+ with st.chat_message(message['role']):
25
+ st.markdown(message['content'])
26
+
27
+ # accept user input
28
+ if prompt := st.chat_input("What is up?"):
29
+ # add user message to user history
30
+ st.session_state.messages.append({'role':'user','content':prompt})
31
+ # display user message
32
+ with st.chat_message('user'):
33
+ st.markdown(prompt)
34
+
35
+ # display assistant response
36
+ with st.chat_message('assistant'):
37
+ ans = llm.invoke(prompt, stop=['<|eot_id|>'])
38
+ respose = st.write_stream(response_generator(prompt))
39
+ st.session_state.messages.append({'role':'assistant', 'content':respose})
40
+
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.184
2
+ PyPDF2==3.0.1
3
+ python-dotenv==1.0.0
4
+ streamlit==1.18.1
5
+ faiss-cpu==1.7.4
6
+ altair==4
7
+ tiktoken==0.4.0
8
+ # uncomment to use huggingface llms
9
+ huggingface-hub==0.14.1
10
+
11
+ # uncomment to use instructor embeddings
12
+ InstructorEmbedding==1.0.1
13
+ sentence-transformers==2.2.2