Update README.md
Browse files
README.md
CHANGED
@@ -42,7 +42,7 @@ device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
|
|
42 |
device = 'cpu'
|
43 |
|
44 |
# load model
|
45 |
-
#
|
46 |
clip_tokenizer = AutoTokenizer.from_pretrained("yuxindu/segvol")
|
47 |
model = AutoModel.from_pretrained("yuxindu/segvol", trust_remote_code=True, test_mode=True)
|
48 |
model.model.text_encoder.tokenizer = clip_tokenizer
|
@@ -59,6 +59,7 @@ categories = ["liver", "kidney", "spleen", "pancreas"]
|
|
59 |
|
60 |
# generate npy data format
|
61 |
ct_npy, gt_npy = model.processor.preprocess_ct_gt(ct_path, gt_path, category=categories)
|
|
|
62 |
|
63 |
# go through zoom_transform to generate zoomout & zoomin views
|
64 |
data_item = model.processor.zoom_transform(ct_npy, gt_npy)
|
|
|
42 |
device = 'cpu'
|
43 |
|
44 |
# load model
|
45 |
+
# IF you cannot connect to huggingface.co, you can download the repo and set from_pretrained path as the loacl dir path, replacing "yuxindu/segvol"
|
46 |
clip_tokenizer = AutoTokenizer.from_pretrained("yuxindu/segvol")
|
47 |
model = AutoModel.from_pretrained("yuxindu/segvol", trust_remote_code=True, test_mode=True)
|
48 |
model.model.text_encoder.tokenizer = clip_tokenizer
|
|
|
59 |
|
60 |
# generate npy data format
|
61 |
ct_npy, gt_npy = model.processor.preprocess_ct_gt(ct_path, gt_path, category=categories)
|
62 |
+
# IF you have download our 25 processed datasets, you can skip to here with the processed ct_npy, gt_npy files
|
63 |
|
64 |
# go through zoom_transform to generate zoomout & zoomin views
|
65 |
data_item = model.processor.zoom_transform(ct_npy, gt_npy)
|