jiminHuang commited on
Commit
4c52fb0
·
verified ·
1 Parent(s): 4922032

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -8,11 +8,10 @@ import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
  DESCRIPTION = """\
11
- # Llama 3.2 3B Instruct
12
 
13
- Llama 3.2 3B is Meta's latest iteration of open LLMs.
14
- This is a demo of [`meta-llama/Llama-3.2-3B-Instruct`](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct), fine-tuned for instruction following.
15
- For more details, please check [our post](https://huggingface.co/blog/llama32).
16
  """
17
 
18
  MAX_MAX_NEW_TOKENS = 2048
@@ -21,7 +20,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
21
 
22
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
 
24
- model_id = "meta-llama/Llama-3.2-3B-Instruct"
25
  tokenizer = AutoTokenizer.from_pretrained(model_id)
26
  model = AutoModelForCausalLM.from_pretrained(
27
  model_id,
 
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
  DESCRIPTION = """\
11
+ # Plutus 8B instruct
12
 
13
+ Plutus 8B is The Fin AI's latest iteration of open LLMs.
14
+ This is a demo of [`TheFinAI/plutus-8B-instruct`](https://huggingface.co/TheFinAI/plutus-8B-instruct), fine-tuned for instruction following.
 
15
  """
16
 
17
  MAX_MAX_NEW_TOKENS = 2048
 
20
 
21
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
22
 
23
+ model_id = "TheFinAI/plutus-8B-instruct"
24
  tokenizer = AutoTokenizer.from_pretrained(model_id)
25
  model = AutoModelForCausalLM.from_pretrained(
26
  model_id,