Text Generation
Transformers
Safetensors
imp
custom_code
MILVLG commited on
Commit
671cf99
1 Parent(s): 6224cf2

Delete test.py

Browse files
Files changed (1) hide show
  1. test.py +0 -29
test.py DELETED
@@ -1,29 +0,0 @@
1
- import torch
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- from PIL import Image
4
-
5
-
6
- torch.set_default_device("cuda")
7
-
8
- #Create model
9
- model = AutoModelForCausalLM.from_pretrained(
10
- "/data/ouyangxc/labs/hg/imp-v1-3b",
11
- torch_dtype=torch.float16,
12
- device_map="auto",
13
- trust_remote_code=True)
14
- tokenizer = AutoTokenizer.from_pretrained("/data/ouyangxc/labs/hg/imp-v1-3b", trust_remote_code=True)
15
-
16
- #Set inputs
17
- text = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWhat's the color of the car? ASSISTANT:"
18
- image = Image.open("images/car.jpg")
19
-
20
- input_ids = tokenizer(text, return_tensors='pt').input_ids
21
- image_tensor = model.image_preprocess(image)
22
-
23
- #Generate the answer
24
- output_ids = model.generate(
25
- input_ids,
26
- max_new_tokens=150,
27
- images=image_tensor,
28
- use_cache=True)[0]
29
- print(tokenizer.decode(output_ids[input_ids.shape[1]:], skip_special_tokens=True).strip())