Renegadesoffun commited on
Commit
6a81d61
·
1 Parent(s): e3d0ac9

Updated app.py to use TheBloke/Mistral-7B-OpenOrca-GGUF model with base tokenizer

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -3,7 +3,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
  # Initialize the model and tokenizer
5
  model_name = "TheBloke/Mistral-7B-OpenOrca-GGUF"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0) # device=0 means use CPU
9
 
 
3
 
4
  # Initialize the model and tokenizer
5
  model_name = "TheBloke/Mistral-7B-OpenOrca-GGUF"
6
+ base_model_name = "EleutherAI/gpt-j-1.1B" # Base model for tokenizer
7
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0) # device=0 means use CPU
10