ryanpdwyer commited on
Commit
e424b8e
·
1 Parent(s): 683bb94

Updated to use token

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -18,8 +18,8 @@ def load_model_and_tokenizer(model_name):
18
  model = AutoModelForCausalLM.from_pretrained(model_name)
19
  return model, tokenizer
20
 
21
- model_8b, tokenizer_8b = load_model_and_tokenizer("meta-llama/Meta-Llama-3.1-8B")
22
- model_8b_instruct, tokenizer_8b_instruct = load_model_and_tokenizer("meta-llama/Meta-Llama-3.1-8B-Instruct")
23
 
24
  def generate_text(model, tokenizer, prompt, max_length=100):
25
  inputs = tokenizer(prompt, return_tensors="pt")
@@ -36,10 +36,10 @@ if st.button("Generate"):
36
  if prompt:
37
  col1, col2 = st.columns(2)
38
 
39
- with col1:
40
- st.subheader("LLaMA-3.1-8B Output")
41
- output_8b = generate_text(model_8b, tokenizer_8b, prompt, max_length)
42
- st.write(output_8b)
43
 
44
  with col2:
45
  st.subheader("LLaMA-3.1-8B-Instruct Output")
 
18
  model = AutoModelForCausalLM.from_pretrained(model_name)
19
  return model, tokenizer
20
 
21
+ model_8b, tokenizer_8b = load_model_and_tokenizer("unsloth/Meta-Llama-3.1-8B-bnb-4bit")
22
+ model_8b_instruct, tokenizer_8b_instruct = load_model_and_tokenizer("SanctumAI/Meta-Llama-3.1-8B-Instruct-GGUF")
23
 
24
  def generate_text(model, tokenizer, prompt, max_length=100):
25
  inputs = tokenizer(prompt, return_tensors="pt")
 
36
  if prompt:
37
  col1, col2 = st.columns(2)
38
 
39
+ # with col1:
40
+ # st.subheader("LLaMA-3.1-8B Output")
41
+ # output_8b = generate_text(model_8b, tokenizer_8b, prompt, max_length)
42
+ # st.write(output_8b)
43
 
44
  with col2:
45
  st.subheader("LLaMA-3.1-8B-Instruct Output")