thewise commited on
Commit
a4f3f65
·
verified ·
1 Parent(s): 2e68496

Update src/main.py

Browse files
Files changed (1) hide show
  1. src/main.py +11 -4
src/main.py CHANGED
@@ -8,11 +8,14 @@ from langchain.document_loaders import TextLoader
8
  from langchain.chains import RetrievalQA, ConversationalRetrievalChain
9
  from langchain.memory import ConversationBufferMemory
10
  from langchain.chat_models import ChatOllama
 
 
 
 
11
  from langchain.document_loaders import TextLoader
12
  from langchain.document_loaders import GitLoader
13
  from langchain.memory import ConversationBufferMemory, ConversationBufferWindowMemory
14
  from langchain.vectorstores import Chroma
15
- from langchain.embeddings import OllamaEmbeddings
16
  from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, ChatPromptTemplate
17
  import datetime
18
  import shutil
@@ -62,9 +65,13 @@ def ingest_chunks(chunks):
62
  #Retreival function to get the data from the database and reply to the user
63
  def retreival(vector_store, k):
64
  #Creating LLM
65
- llm = ChatOllama(
66
- base_url='https://thewise-ollama-server.hf.space',
67
- model="codellama")
 
 
 
 
68
 
69
  # Define the system message template
70
  #Adding CHAT HISTORY to the System template explicitly because mainly Chat history goes to Condense the Human Question with Backround (Not template), but System template goes straight the LLM Chain
 
8
  from langchain.chains import RetrievalQA, ConversationalRetrievalChain
9
  from langchain.memory import ConversationBufferMemory
10
  from langchain.chat_models import ChatOllama
11
+ from langchain.llms import Ollama
12
+ from langchain.embeddings import OllamaEmbeddings
13
+ from langchain.callbacks.manager import CallbackManager
14
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
15
  from langchain.document_loaders import TextLoader
16
  from langchain.document_loaders import GitLoader
17
  from langchain.memory import ConversationBufferMemory, ConversationBufferWindowMemory
18
  from langchain.vectorstores import Chroma
 
19
  from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate, ChatPromptTemplate
20
  import datetime
21
  import shutil
 
65
  #Retreival function to get the data from the database and reply to the user
66
  def retreival(vector_store, k):
67
  #Creating LLM
68
+ # llm = ChatOllama(
69
+ # base_url='https://thewise-ollama-server.hf.space',
70
+ # model="codellama")
71
+ llm = Ollama(
72
+ model="codellama",
73
+ base_url="https://thewise-ollama-server.hf.space",
74
+ callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
75
 
76
  # Define the system message template
77
  #Adding CHAT HISTORY to the System template explicitly because mainly Chat history goes to Condense the Human Question with Backround (Not template), but System template goes straight the LLM Chain