Spaces:
Running
on
L4
Running
on
L4
import gradio as gr | |
import os | |
from peft import PeftConfig, PeftModel | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
base_model_name = "google/gemma-2b" | |
adapter_model_name = "samidh/cope-gemma2b-hs-2c-skr-s1.5.9.d25" | |
model = AutoModelForCausalLM.from_pretrained(base_model_name, token=os.environ['HF_TOKEN']) | |
model = PeftModel.from_pretrained(model, adapter_model_name, token=os.environ['HF_TOKEN']) | |
tokenizer = AutoTokenizer.from_pretrained(base_model_name) | |
#gr.load("models/samidh/cope-gemma2b-hs-2c-skr-s1.5.9.d25", hf_token=os.environ['HF_TOKEN']).launch() |