MikeCraBash commited on
Commit
6e35603
1 Parent(s): a387feb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -28,7 +28,7 @@ from transformers import AutoTokenizer
28
 
29
  # Function to calculate token length using Hugging Face tokenizer
30
  def hf_token_len(text):
31
- tokenizer = AutoTokenizer.from_pretrained("Upstage/solar-1-mini-chat")
32
  tokens = tokenizer.encode(text)
33
  return len(tokens)
34
 
@@ -61,8 +61,8 @@ qdrant_vectorstore = Qdrant.from_documents(
61
  qdrant_retriever = qdrant_vectorstore.as_retriever()
62
 
63
  # Load the Solar 10.7B model
64
- tokenizer = AutoTokenizer.from_pretrained("Upstage/solar-1-mini-chat")
65
- model = AutoModelForCausalLM.from_pretrained("Upstage/solar-1-mini-chat")
66
 
67
  from langchain.prompts import ChatPromptTemplate
68
 
@@ -128,7 +128,7 @@ retrieval_augmented_qa_chain = (
128
  @cl.on_chat_start
129
  async def start_chat():
130
  settings = {
131
- "model": "Upstage/solar-1-mini-chat",
132
  "temperature": 0,
133
  "max_tokens": 500,
134
  "top_p": 1,
 
28
 
29
  # Function to calculate token length using Hugging Face tokenizer
30
  def hf_token_len(text):
31
+ tokenizer = AutoTokenizer.from_pretrained("solar-1-mini-chat")
32
  tokens = tokenizer.encode(text)
33
  return len(tokens)
34
 
 
61
  qdrant_retriever = qdrant_vectorstore.as_retriever()
62
 
63
  # Load the Solar 10.7B model
64
+ tokenizer = AutoTokenizer.from_pretrained("solar-1-mini-chat")
65
+ model = AutoModelForCausalLM.from_pretrained("solar-1-mini-chat")
66
 
67
  from langchain.prompts import ChatPromptTemplate
68
 
 
128
  @cl.on_chat_start
129
  async def start_chat():
130
  settings = {
131
+ "model": "solar-1-mini-chat",
132
  "temperature": 0,
133
  "max_tokens": 500,
134
  "top_p": 1,