gba16326553 commited on
Commit
cfd8886
·
verified ·
1 Parent(s): f1dca2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -11,10 +11,12 @@ examples = [["How are you?"]]
11
  from transformers import AutoModel
12
  model = AutoModel.from_pretrained("ironlanderl/gemma-2-2b-it-Q5_K_M-GGUF")
13
  #modelName = "google/gemma-2-2b-it"
14
- modelName = "ironlanderl/gemma-2-2b-it-Q5_K_M-GGUF"
15
- tokenizer = AutoTokenizer.from_pretrained(modelName)
 
 
16
 
17
- model = AutoModel.from_pretrained(modelName)
18
  #model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", torch_dtype=torch.float16 )
19
  #stvlynn/Gemma-2-2b-Chinese-it
20
  #tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
 
11
  from transformers import AutoModel
12
  model = AutoModel.from_pretrained("ironlanderl/gemma-2-2b-it-Q5_K_M-GGUF")
13
  #modelName = "google/gemma-2-2b-it"
14
+ #modelName = "ironlanderl/gemma-2-2b-it-Q5_K_M-GGUF"
15
+ modelName = "bartowski/Mistral-Nemo-Instruct-2407-GGUF"
16
+ modelId = "Mistral-Nemo-Instruct-2407-Q4_0_8_8.gguf"
17
+ tokenizer = AutoTokenizer.from_pretrained(modelName,gguf_file=modelId)
18
 
19
+ model = AutoModel.from_pretrained(modelName,gguf_file=modelId)
20
  #model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", torch_dtype=torch.float16 )
21
  #stvlynn/Gemma-2-2b-Chinese-it
22
  #tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")