MikeCraBash commited on
Commit
3acbbe1
·
verified ·
1 Parent(s): 50a2078

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -28,7 +28,7 @@ from transformers import AutoTokenizer
28
 
29
  # Function to calculate token length using Hugging Face tokenizer
30
  def hf_token_len(text):
31
- tokenizer = AutoTokenizer.from_pretrained("Upstage/SOLAR-10.7B-v1.0")
32
  tokens = tokenizer.encode(text)
33
  return len(tokens)
34
 
@@ -46,7 +46,7 @@ split_chunks = text_splitter.split_documents(docs)
46
  # Load the embeddings model
47
  from langchain.embeddings import HuggingFaceEmbeddings
48
 
49
- embedding_model = HuggingFaceEmbeddings(model_name="solar-10.7b")
50
 
51
  # Load the vector store and retriever from Qdrant
52
  from langchain.vectorstores import Qdrant
@@ -61,8 +61,8 @@ qdrant_vectorstore = Qdrant.from_documents(
61
  qdrant_retriever = qdrant_vectorstore.as_retriever()
62
 
63
  # Load the Solar 10.7B model
64
- tokenizer = AutoTokenizer.from_pretrained("Upstage/SOLAR-10.7B-v1.0")
65
- model = AutoModelForCausalLM.from_pretrained("Upstage/SOLAR-10.7B-v1.0")
66
 
67
  from langchain.prompts import ChatPromptTemplate
68
 
@@ -128,7 +128,7 @@ retrieval_augmented_qa_chain = (
128
  @cl.on_chat_start
129
  async def start_chat():
130
  settings = {
131
- "model": "solar-10.7b",
132
  "temperature": 0,
133
  "max_tokens": 500,
134
  "top_p": 1,
 
28
 
29
  # Function to calculate token length using Hugging Face tokenizer
30
  def hf_token_len(text):
31
+ tokenizer = AutoTokenizer.from_pretrained("Upstage/SOLAR-1-mini-chat")
32
  tokens = tokenizer.encode(text)
33
  return len(tokens)
34
 
 
46
  # Load the embeddings model
47
  from langchain.embeddings import HuggingFaceEmbeddings
48
 
49
+ embedding_model = HuggingFaceEmbeddings(model_name="solar-embedding-1-large")
50
 
51
  # Load the vector store and retriever from Qdrant
52
  from langchain.vectorstores import Qdrant
 
61
  qdrant_retriever = qdrant_vectorstore.as_retriever()
62
 
63
  # Load the Solar 10.7B model
64
+ tokenizer = AutoTokenizer.from_pretrained("Upstage/SOLAR-1-mini-chat")
65
+ model = AutoModelForCausalLM.from_pretrained("Upstage/SOLAR-1-mini-chat")
66
 
67
  from langchain.prompts import ChatPromptTemplate
68
 
 
128
  @cl.on_chat_start
129
  async def start_chat():
130
  settings = {
131
+ "model": ""Upstage/SOLAR-1-mini-chat",
132
  "temperature": 0,
133
  "max_tokens": 500,
134
  "top_p": 1,