Spaces:
Running
Running
import os | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from huggingface_hub import login | |
# Directly assign your Hugging Face token here | |
hf_token = "your_hugging_face_api_token" | |
# Log in to Hugging Face | |
login(token=hf_token) | |
# Load the Mixtral-8x7B-Instruct model and tokenizer with authorization header | |
model_name = 'mistralai/Mistral-7B-Instruct-v0.3' | |
headers = {"Authorization": f"Bearer {hf_token}"} | |
# Ensure sentencepiece is installed | |
try: | |
import sentencepiece | |
except ImportError: | |
raise ImportError("The sentencepiece library is required for this tokenizer. Please install it with `pip install sentencepiece`.") | |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token) | |
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_token) | |
# Check if a GPU is available and if not, fall back to CPU | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model.to(device) | |
# Example text input | |
text_input = "How did Tesla perform in Q1 2024?" | |
# Tokenize the input text | |
inputs = tokenizer(text_input, return_tensors="pt").to(device) | |
# Generate a response | |
outputs = model.generate(**inputs, max_length=150, temperature=0.7, top_p=0.9, top_k=50) | |
# Decode the generated tokens to a readable string | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Print the response | |
print(response) |