import torch from transformers import AutoModelForCausalLM, AutoTokenizer import os # Explicitly set the cache directory # cache_dir = 'D:/huggingface_cache' # Check the current working directory print("Current working directory:", os.getcwd()) # Load the tokenizer and model with cache_dir tokenizer = AutoTokenizer.from_pretrained("chatdb/natural-sql-7b") model = AutoModelForCausalLM.from_pretrained( "chatdb/natural-sql-7b", device_map="auto", torch_dtype=torch.float16, )