Do0rMaMu commited on
Commit
ede4c8c
·
verified ·
1 Parent(s): d667031

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +1 -1
main.py CHANGED
@@ -6,7 +6,7 @@ from llama_cpp import Llama
6
 
7
  # Model loading with specified path and configuration
8
  llm = Llama(
9
- model_path="Meta-Llama-3-8B-Instruct.Q4_K_M.gguf", # Update the path as necessary
10
  n_ctx=4096, # Maximum number of tokens for context (input + output)
11
  n_threads=2, # Number of CPU cores used
12
  )
 
6
 
7
  # Model loading with specified path and configuration
8
  llm = Llama(
9
+ model_path="Llama-3.2-3B-Instruct-Q8_0.gguf", # Update the path as necessary
10
  n_ctx=4096, # Maximum number of tokens for context (input + output)
11
  n_threads=2, # Number of CPU cores used
12
  )