Pie31415 commited on
Commit
2a27c35
·
1 Parent(s): 82b5728

updated gradio app

Browse files
Files changed (4) hide show
  1. app.py +56 -12
  2. attack.py +59 -0
  3. model.py +78 -0
  4. models/resnet.ckpt +3 -0
app.py CHANGED
@@ -1,17 +1,61 @@
1
- import gradio as gr
2
- from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
3
  import torch
 
 
 
 
 
4
 
5
- model_id = "stabilityai/stable-diffusion-2"
6
- scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
7
- device = troch.device("cuda" if torch.cuda.is_avaliable() else "cpu")
8
 
9
- pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, revision="fp16")
 
 
10
  pipe = pipe.to(device)
11
 
12
- def txt2img(prompt):
13
- image = pipe(prompt, height=768, width=768, guidance_scale = 10).images[0]
14
- # image.save("sd_image.png")
15
- return image
16
-
17
- gr.Interface(txt2img, gr.Text(), gr.Image(), title = 'Stable Diffusion 2.0 Colab with Gradio UI').launch(share = False, debug = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
+ import gradio as gr
3
+ from torchvision import transforms
4
+ from diffusers import StableDiffusionPipeline
5
+ from model import ResNet, ResidualBlock
6
+ from attack import Attack
7
 
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
9
 
10
+ pipe = StableDiffusionPipeline.from_pretrained(
11
+ "stabilityai/stable-diffusion-2-1-base"
12
+ )
13
  pipe = pipe.to(device)
14
 
15
+ CLASSES = (
16
+ "plane",
17
+ "car",
18
+ "bird",
19
+ "cat",
20
+ "deer",
21
+ "dog",
22
+ "frog",
23
+ "horse",
24
+ "ship",
25
+ "truck",
26
+ )
27
+
28
+
29
+ def load_classifer(model_path):
30
+ # load resnet model
31
+ model = ResNet(ResidualBlock, [2, 2, 2])
32
+ model.load_state_dict(torch.load(model_path))
33
+ model.eval()
34
+ return model
35
+
36
+
37
+ classifer = load_classifer("./models/resnet.ckpt")
38
+ attack = Attack(pipe, classifer, device)
39
+
40
+
41
+ def classifer_pred(image):
42
+ to_pil = transforms.ToPILImage()
43
+ input = attack.transform(to_pil(image[0]))
44
+ outputs = classifer(input)
45
+ _, predicted = torch.max(outputs, 1)
46
+ return CLASSES[predicted[0]]
47
+
48
+
49
+ def run_attack(prompt, epsilon):
50
+ image, perturbed_image = attack(prompt, epsilon=epsilon)
51
+ pred = classifer_pred(perturbed_image)
52
+ return image, pred
53
+
54
+
55
+ demo = gr.Interface(
56
+ run_attack,
57
+ [gr.Text(), gr.Slider(minimum=0.0, maximum=0.3, value=float)],
58
+ [gr.Image(), gr.Text()],
59
+ title="Stable Diffused Adversarial Attacks",
60
+ )
61
+ demo.launch()
attack.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision import transforms
3
+
4
+
5
+ class Attack:
6
+ def __init__(self, pipe, classifer, device="cuda"):
7
+ self.device = device
8
+ self.pipe = pipe
9
+ self.generator = torch.Generator(device=self.device).manual_seed(1024)
10
+ self.classifer = classifer
11
+
12
+ def __call__(
13
+ self, prompt, negative_prompt="", size=512, guidance_scale=8, epsilon=0
14
+ ):
15
+ pipe_output = self.pipe(
16
+ prompt=prompt, # What to generate
17
+ negative_prompt=negative_prompt, # What NOT to generate
18
+ height=size,
19
+ width=size, # Specify the image size
20
+ guidance_scale=guidance_scale, # How strongly to follow the prompt
21
+ num_inference_steps=30, # How many steps to take
22
+ generator=self.generator, # Fixed random seed
23
+ )
24
+
25
+ # Resulting image:
26
+ init_image = pipe_output.images[0]
27
+ image = self.transform(init_image)
28
+
29
+ image.requires_grad = True
30
+
31
+ outputs = self.classifer(image).to(self.device)
32
+
33
+ target = torch.tensor([0]).to(self.device)
34
+
35
+ return (
36
+ init_image,
37
+ self.untargeted_attack(image, outputs, target, epsilon),
38
+ )
39
+
40
+ def transform(self, image):
41
+ img_tfms = transforms.Compose(
42
+ [transforms.Resize(32), transforms.ToTensor()]
43
+ )
44
+ image = img_tfms(image)
45
+ image = torch.unsqueeze(image, dim=0)
46
+ return image
47
+
48
+ def untargeted_attack(self, image, pred, target, epsilon):
49
+ loss = torch.nn.functional.nll_loss(pred, target)
50
+
51
+ self.classifer.zero_grad()
52
+
53
+ loss.backward()
54
+
55
+ gradient_sign = image.grad.data.sign()
56
+ perturbed_image = image + epsilon * gradient_sign
57
+ perturbed_image = torch.clamp(perturbed_image, 0, 1)
58
+
59
+ return perturbed_image
model.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+
4
+ def conv3x3(in_channels, out_channels, stride=1):
5
+ return nn.Conv2d(
6
+ in_channels,
7
+ out_channels,
8
+ kernel_size=3,
9
+ stride=stride,
10
+ padding=1,
11
+ bias=False,
12
+ )
13
+
14
+
15
+ class ResidualBlock(nn.Module):
16
+ def __init__(self, in_channels, out_channels, stride=1, downsample=None):
17
+ super(ResidualBlock, self).__init__()
18
+ self.conv1 = conv3x3(in_channels, out_channels, stride)
19
+ self.bn1 = nn.BatchNorm2d(out_channels)
20
+ self.relu = nn.ReLU(inplace=True)
21
+ self.conv2 = conv3x3(out_channels, out_channels)
22
+ self.bn2 = nn.BatchNorm2d(out_channels)
23
+ self.downsample = downsample
24
+
25
+ def forward(self, x):
26
+ residual = x
27
+ out = self.conv1(x)
28
+ out = self.bn1(out)
29
+ out = self.relu(out)
30
+ out = self.conv2(out)
31
+ out = self.bn2(out)
32
+ if self.downsample:
33
+ residual = self.downsample(x)
34
+ out += residual
35
+ out = self.relu(out)
36
+ return out
37
+
38
+
39
+ class ResNet(nn.Module):
40
+ def __init__(self, block, layers, num_classes=10):
41
+ super(ResNet, self).__init__()
42
+ self.in_channels = 16
43
+ self.conv = conv3x3(3, 16)
44
+ self.bn = nn.BatchNorm2d(16)
45
+ self.relu = nn.ReLU(inplace=True)
46
+ self.layer1 = self.make_layer(block, 16, layers[0])
47
+ self.layer2 = self.make_layer(block, 32, layers[1], 2)
48
+ self.layer3 = self.make_layer(block, 64, layers[2], 2)
49
+ self.avg_pool = nn.AvgPool2d(8)
50
+ self.fc = nn.Linear(64, num_classes)
51
+
52
+ def make_layer(self, block, out_channels, blocks, stride=1):
53
+ downsample = None
54
+ if (stride != 1) or (self.in_channels != out_channels):
55
+ downsample = nn.Sequential(
56
+ conv3x3(self.in_channels, out_channels, stride=stride),
57
+ nn.BatchNorm2d(out_channels),
58
+ )
59
+ layers = []
60
+ layers.append(
61
+ block(self.in_channels, out_channels, stride, downsample)
62
+ )
63
+ self.in_channels = out_channels
64
+ for i in range(1, blocks):
65
+ layers.append(block(out_channels, out_channels))
66
+ return nn.Sequential(*layers)
67
+
68
+ def forward(self, x):
69
+ out = self.conv(x)
70
+ out = self.bn(out)
71
+ out = self.relu(out)
72
+ out = self.layer1(out)
73
+ out = self.layer2(out)
74
+ out = self.layer3(out)
75
+ out = self.avg_pool(out)
76
+ out = out.view(out.size(0), -1)
77
+ out = self.fc(out)
78
+ return out
models/resnet.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f103f240fcc6500cf05aebda74a2eba82112fe34f799a0e1f8c93b8704ce4de8
3
+ size 812339