Spaces:
Sleeping
Sleeping
Use Mixtral
Browse files- requirements.txt +1 -0
- worker.py +2 -2
requirements.txt
CHANGED
@@ -2,3 +2,4 @@ langchain
|
|
2 |
langchain-community
|
3 |
langchain-huggingface
|
4 |
chromadb
|
|
|
|
2 |
langchain-community
|
3 |
langchain-huggingface
|
4 |
chromadb
|
5 |
+
InstructorEmbedding
|
worker.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
import os
|
2 |
import torch
|
3 |
from langchain.chains import RetrievalQA
|
4 |
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
@@ -38,8 +37,9 @@ def init_llm():
|
|
38 |
|
39 |
# repo name for the model
|
40 |
# model_id = "tiiuae/falcon-7b-instruct"
|
41 |
-
model_id = "microsoft/Phi-3.5-mini-instruct"
|
42 |
# model_id = "meta-llama/Llama-3.2-1B-Instruct"
|
|
|
43 |
|
44 |
# load the model into the HuggingFaceHub
|
45 |
llm_hub = HuggingFaceEndpoint(repo_id=model_id, temperature=0.1, max_new_tokens=600, model_kwargs={"max_length":600})
|
|
|
|
|
1 |
import torch
|
2 |
from langchain.chains import RetrievalQA
|
3 |
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
|
|
37 |
|
38 |
# repo name for the model
|
39 |
# model_id = "tiiuae/falcon-7b-instruct"
|
40 |
+
# model_id = "microsoft/Phi-3.5-mini-instruct"
|
41 |
# model_id = "meta-llama/Llama-3.2-1B-Instruct"
|
42 |
+
model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
43 |
|
44 |
# load the model into the HuggingFaceHub
|
45 |
llm_hub = HuggingFaceEndpoint(repo_id=model_id, temperature=0.1, max_new_tokens=600, model_kwargs={"max_length":600})
|