myownskyW7 commited on
Commit
eb081b5
1 Parent(s): 4d85c7e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -2
README.md CHANGED
@@ -34,7 +34,7 @@ import torch
34
  from PIL import Image
35
  from transformers import AutoTokenizer, AutoModelForCausalLM
36
  ckpt_path = "internlm/internlm-xcomposer2-7b"
37
- tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True).cuda()
38
  # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
39
  model = AutoModelForCausalLM.from_pretrained(ckpt_path, torch_dtype=torch.float16, trust_remote_code=True).cuda()
40
  model = model.eval()
@@ -68,9 +68,10 @@ In conclusion, pandas are truly remarkable animals that deserve our admiration a
68
 
69
  ```python
70
  import torch
 
71
  from transformers import AutoTokenizer, AutoModelForCausalLM
72
  ckpt_path = "internlm/internlm-xcomposer2-7b"
73
- tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True).cuda()
74
  # `torch_dtype=torch.float16` 可以令模型以 float16 精度加载,否则 transformers 会将模型加载为 float32,导致显存不足
75
  model = AutoModelForCausalLM.from_pretrained(ckpt_path, torch_dtype=torch.float16, trust_remote_code=True).cuda()
76
  model = model.eval()
 
34
  from PIL import Image
35
  from transformers import AutoTokenizer, AutoModelForCausalLM
36
  ckpt_path = "internlm/internlm-xcomposer2-7b"
37
+ tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True)
38
  # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
39
  model = AutoModelForCausalLM.from_pretrained(ckpt_path, torch_dtype=torch.float16, trust_remote_code=True).cuda()
40
  model = model.eval()
 
68
 
69
  ```python
70
  import torch
71
+ from PIL import Image
72
  from transformers import AutoTokenizer, AutoModelForCausalLM
73
  ckpt_path = "internlm/internlm-xcomposer2-7b"
74
+ tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True)
75
  # `torch_dtype=torch.float16` 可以令模型以 float16 精度加载,否则 transformers 会将模型加载为 float32,导致显存不足
76
  model = AutoModelForCausalLM.from_pretrained(ckpt_path, torch_dtype=torch.float16, trust_remote_code=True).cuda()
77
  model = model.eval()