Renegadesoffun commited on
Commit
01e5d9a
·
1 Parent(s): 6a81d61

Updated tokenizer path for GGUF model

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -4,7 +4,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  # Initialize the model and tokenizer
5
  model_name = "TheBloke/Mistral-7B-OpenOrca-GGUF"
6
  base_model_name = "EleutherAI/gpt-j-1.1B" # Base model for tokenizer
7
- tokenizer = AutoTokenizer.from_pretrained(base_model_name)
 
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0) # device=0 means use CPU
10
 
 
4
  # Initialize the model and tokenizer
5
  model_name = "TheBloke/Mistral-7B-OpenOrca-GGUF"
6
  base_model_name = "EleutherAI/gpt-j-1.1B" # Base model for tokenizer
7
+ tokenizer = AutoTokenizer.from_pretrained("TheBloke/Mistral-7B-OpenOrca-GGUF")
8
+
9
  model = AutoModelForCausalLM.from_pretrained(model_name)
10
  chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0) # device=0 means use CPU
11