Valhala
Collection
Personalized SFT Model
•
1 item
•
Updated
TBH.AI Valhala is a highly optimized reasoning model built upon saishshinde15/TBH.AI_Base_Reasoning
, further refined with high-quality, curated datasets to enhance reasoning and structured response generation. This model belongs to the Vortex Family, a suite of four fine-tuned models tailored for advanced knowledge synthesis and decision-making.
Unlike reinforcement learning-based enhancements, Supervised Fine-Tuning (SFT) was chosen to ensure stability, reliability, and alignment with human-preferred responses, making Valhala an ideal choice for analytical and structured tasks.
from unsloth import FastLanguageModel
import torch
max_seq_length = 2048 # Choose any! RoPE Scaling supported internally!
dtype = None # Auto detection (Float16 for T4/V100, Bfloat16 for Ampere+)
load_in_4bit = True # Use 4-bit quantization to optimize memory usage
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "saishshinde15/TBH.AI_Valhala",
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit
)
FastLanguageModel.for_inference(model)
instruction = """You are an advanced AI assistant. Provide answers in a clear manner."""
messages = [
{"role": "system", "content": instruction},
{"role": "user", "content": "Who created you?"}
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True).to("cuda")
outputs = model.generate(**inputs, max_new_tokens=1500, num_return_sequences=1)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
assistant_start = text.find("assistant")
response = text[assistant_start + len("assistant"):].strip() if assistant_start != -1 else text
print(response)
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
model_name = "saishshinde15/TBH.AI_Valhala"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
instruction = """You are an advanced AI assistant. Provide answers in a clear manner."""
messages = [
{"role": "system", "content": instruction},
{"role": "user", "content": "Who created you?"}
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(device)
output_ids = model.generate(
**inputs,
max_new_tokens=1500,
temperature=0.8,
top_p=0.95,
do_sample=True,
)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
assistant_start = response.find("assistant")
response = response[assistant_start + len("assistant"):].strip() if assistant_start != -1 else response
print(response)
Base model
Qwen/Qwen2.5-3B