not run

#2
by rakmik - opened

from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("autobots/gpt-j-fourchannel-4bit")
model = AutoModelForCausalLM.from_pretrained("autobots/gpt-j-fourchannel-4bit", trust_remote_code=True, torch_dtype=torch.float16, device_map="auto")

we use alpaca prompt

input_content = "hi"
input_ids = tokenizer.encode(input_content, return_tensors="pt")
output = model.generate(input_ids, max_length=11, temperature=0.7)
output_text = tokenizer.decode(output[0], skip_special_tokens=True)
print(output_text)

/usr/local/lib/python3.11/dist-packages/huggingface_hub/utils/_auth.py:94: UserWarning:
The secret HF_TOKEN does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to access public models or datasets.
warnings.warn(
tokenizer_config.json: 100%
 619/619 [00:00<00:00, 10.5kB/s]
vocab.json: 100%
 798k/798k [00:00<00:00, 4.17MB/s]
merges.txt: 100%
 456k/456k [00:00<00:00, 6.46MB/s]
tokenizer.json: 100%
 1.37M/1.37M [00:00<00:00, 10.8MB/s]
added_tokens.json: 100%
 4.04k/4.04k [00:00<00:00, 71.7kB/s]
special_tokens_map.json: 100%
 357/357 [00:00<00:00, 11.9kB/s]
config.json: 100%
 930/930 [00:00<00:00, 18.3kB/s]

OSError Traceback (most recent call last)
in <cell line: 0>()
2 import torch
3 tokenizer = AutoTokenizer.from_pretrained("autobots/gpt-j-fourchannel-4bit")
----> 4 model = AutoModelForCausalLM.from_pretrained("autobots/gpt-j-fourchannel-4bit", trust_remote_code=True, torch_dtype=torch.float16, device_map="auto")
5
6 # we use alpaca prompt

1 frames
/usr/local/lib/python3.11/dist-packages/transformers/modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, weights_only, *model_args, **kwargs)
3878 )
3879 else:
-> 3880 raise EnvironmentError(
3881 f"{pretrained_model_name_or_path} does not appear to have a file named"
3882 f" {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)},"

OSError: autobots/gpt-j-fourchannel-4bit does not appear to have a file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt or flax_model.msgpack.

Sign up or log in to comment