Spaces:
Sleeping
Sleeping
File size: 565 Bytes
47107de f0b9204 47107de b16b9ee 47107de |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
# Load model directly
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import streamlit as st
def count_parameters(model,name):
print("Counting parameters of model:{name}")
return sum(p.numel() for p in model.parameters() if p.requires_grad)
m_id = st.text_input("model id", placeholder="K00B404/Merged_Beowolx-CodePro_Medusa2-14X-7B-Mistral-I-v0-2")
if m_id:
tokenizer = AutoTokenizer.from_pretrained(m_id)
model = AutoModelForCausalLM.from_pretrained(m_id)
st.info(f"{count_parameters(model, m_id)} parameters") |