vislupus commited on
Commit
153fb4c
β€’
1 Parent(s): 8729a6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -5,10 +5,10 @@ from huggingface_hub import hf_hub_download
5
 
6
  os.environ["LLAMA_CPP_USE_CUDA"] = "0"
7
 
8
- title = "Gemma 2 2B - Bulgarian Joke Master - GGUF"
9
  description = """
10
- πŸ”Ž [Gemma 2 2B](https://huggingface.co/unsloth/gemma-2-2b-bnb-4bit) fine-tuned for Bulgarian jokes, running on CPU in GGUF format.
11
- This model is fine-tuned for generating humorous content in Bulgarian, utilizing the [Llama.cpp library](https://github.com/ggerganov/llama.cpp).
12
  Running on CPU, it can still produce impressive results, although larger models may require more processing power.
13
  """
14
 
@@ -17,7 +17,7 @@ model_name = "unsloth.Q4_K_M.gguf"
17
  model_path = os.path.join(model_dir, model_name)
18
 
19
  hf_hub_download(
20
- repo_id="vislupus/bulgarian-joke-master-gemma-2-2b-it-bnb-4bit-gguf",
21
  filename=model_name,
22
  local_dir=model_dir
23
  )
 
5
 
6
  os.environ["LLAMA_CPP_USE_CUDA"] = "0"
7
 
8
+ title = "SmolLM 2 - Bulgarian Joke Master - GGUF"
9
  description = """
10
+ πŸ”Ž [SmolLM 2](https://huggingface.co/unsloth/SmolLM2-135M-Instruct-bnb-4bit) fine-tuned for Bulgarian jokes, running on CPU in GGUF format.\n
11
+ This model is fine-tuned for generating humorous content in Bulgarian, utilizing the [Llama.cpp library](https://github.com/ggerganov/llama.cpp).\n
12
  Running on CPU, it can still produce impressive results, although larger models may require more processing power.
13
  """
14
 
 
17
  model_path = os.path.join(model_dir, model_name)
18
 
19
  hf_hub_download(
20
+ repo_id="vislupus/bulgarian-joke-master-SmolLM2-135M-Instruct-bnb-4bit-gguf",
21
  filename=model_name,
22
  local_dir=model_dir
23
  )