Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ from langchain_community.vectorstores import FAISS
|
|
3 |
from langchain_community.document_loaders import PyPDFLoader
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
6 |
-
from
|
7 |
from langchain.chains import ConversationalRetrievalChain
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
import os
|
@@ -67,13 +67,12 @@ def create_db(splits):
|
|
67 |
return vectordb
|
68 |
|
69 |
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db):
|
70 |
-
"""Initialize the LLM chain with correct
|
71 |
-
llm =
|
72 |
-
|
73 |
-
task="text-generation",
|
74 |
model_kwargs={
|
75 |
"temperature": float(temperature),
|
76 |
-
"
|
77 |
"top_k": int(top_k)
|
78 |
},
|
79 |
huggingfacehub_api_token=api_token
|
|
|
3 |
from langchain_community.document_loaders import PyPDFLoader
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
6 |
+
from langchain.llms import HuggingFaceHub
|
7 |
from langchain.chains import ConversationalRetrievalChain
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
import os
|
|
|
67 |
return vectordb
|
68 |
|
69 |
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db):
|
70 |
+
"""Initialize the LLM chain with correct parameters"""
|
71 |
+
llm = HuggingFaceHub(
|
72 |
+
repo_id=llm_model,
|
|
|
73 |
model_kwargs={
|
74 |
"temperature": float(temperature),
|
75 |
+
"max_new_tokens": int(max_tokens),
|
76 |
"top_k": int(top_k)
|
77 |
},
|
78 |
huggingfacehub_api_token=api_token
|