PyaeSoneK commited on
Commit
8e2430c
·
1 Parent(s): 578fb9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -80,6 +80,15 @@ def load_llm_model():
80
  # use_auth_token= st.secrets['hf_access_token'],
81
 
82
  #return llm
 
 
 
 
 
 
 
 
 
83
  pipe = pipeline("text-generation",
84
  model=model,
85
  tokenizer= tokenizer,
@@ -92,18 +101,7 @@ def load_llm_model():
92
  eos_token_id=tokenizer.eos_token_id
93
  )
94
 
95
-
96
-
97
- llm = AutoModelForCausalLM.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
98
- device_map='auto',
99
- torch_dtype=torch.float16,
100
- use_auth_token= st.secrets['hf_access_token'],)
101
- # load_in_4bit=True
102
-
103
- tokenizer = AutoTokenizer.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
104
- use_auth_token=True,)
105
-
106
- return llm
107
 
108
 
109
 
 
80
  # use_auth_token= st.secrets['hf_access_token'],
81
 
82
  #return llm
83
+ model = AutoModelForCausalLM.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
84
+ device_map='auto',
85
+ torch_dtype=torch.float16,
86
+ use_auth_token= st.secrets['hf_access_token'],)
87
+ # load_in_4bit=True
88
+
89
+ tokenizer = AutoTokenizer.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
90
+ use_auth_token=True,)
91
+
92
  pipe = pipeline("text-generation",
93
  model=model,
94
  tokenizer= tokenizer,
 
101
  eos_token_id=tokenizer.eos_token_id
102
  )
103
 
104
+ return model
 
 
 
 
 
 
 
 
 
 
 
105
 
106
 
107