TenzinGayche commited on
Commit
d117130
·
verified ·
1 Parent(s): ba9d175

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -10,14 +10,14 @@ from transformers import AutoModelForCausalLM, GemmaTokenizerFast, TextIteratorS
10
  DESCRIPTION = """\
11
  # Monlam LLM v2.0.1
12
  """
13
-
14
  MAX_MAX_NEW_TOKENS = 2048
15
  DEFAULT_MAX_NEW_TOKENS = 1024
16
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
17
 
18
  # Load the model and tokenizer
19
- tokenizer = GemmaTokenizerFast.from_pretrained("TenzinGayche/ft_final")
20
- model = AutoModelForCausalLM.from_pretrained("TenzinGayche/ft_final", torch_dtype=torch.float16).to("cuda")
21
 
22
  model.config.sliding_window = 4096
23
  model.eval()
 
10
  DESCRIPTION = """\
11
  # Monlam LLM v2.0.1
12
  """
13
+ path="TenzinGayche/tpo_v1.0.0_1010_ft"
14
  MAX_MAX_NEW_TOKENS = 2048
15
  DEFAULT_MAX_NEW_TOKENS = 1024
16
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
17
 
18
  # Load the model and tokenizer
19
+ tokenizer = GemmaTokenizerFast.from_pretrained(path)
20
+ model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.float16).to("cuda")
21
 
22
  model.config.sliding_window = 4096
23
  model.eval()