onehowon commited on
Commit
ceee1cb
ยท
1 Parent(s): 49ede08

requirement

Browse files
Files changed (1) hide show
  1. app.py +56 -34
app.py CHANGED
@@ -1,5 +1,7 @@
1
  import gradio as gr
2
  import torch
 
 
3
  from torchvision import transforms, models
4
  from art.attacks.evasion import FastGradientMethod
5
  from art.estimators.classification import PyTorchClassifier
@@ -8,44 +10,53 @@ import numpy as np
8
  import io
9
  import base64
10
  from blind_watermark import WaterMark
 
11
 
12
- def load_model():
13
- model = models.resnet50(pretrained=False)
14
- num_ftrs = model.fc.in_features
15
- model.fc = torch.nn.Linear(num_ftrs, 10)
16
-
17
- model.load_state_dict(torch.load("model.pt", map_location=torch.device('cpu')))
18
- model.eval()
19
- return model
20
-
21
- def process_image(input_image, eps_value=0.3):
22
- model = load_model()
23
- device = torch.device("cpu")
24
- model = model.to(device)
25
-
26
- criterion = torch.nn.CrossEntropyLoss()
27
- optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
28
-
29
- classifier = PyTorchClassifier(
30
- model=model,
31
- loss=criterion,
32
- optimizer=optimizer,
33
- input_shape=(3, 64, 64),
34
- nb_classes=10,
35
- )
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  transform = transforms.Compose([
38
  transforms.ToTensor(),
39
  transforms.Normalize(mean=[0.485, 0.456, 0.406],
40
  std=[0.229, 0.224, 0.225])
41
  ])
 
42
 
43
- img_tensor = transform(input_image).unsqueeze(0).to(device)
 
 
44
 
 
45
  attack = FastGradientMethod(estimator=classifier, eps=eps_value)
 
 
46
  adv_img_tensor = attack.generate(x=img_tensor.cpu().numpy())
47
  adv_img_tensor = torch.tensor(adv_img_tensor).to(device)
48
 
 
49
  adv_img_np = adv_img_tensor.squeeze(0).cpu().numpy()
50
  mean = np.array([0.485, 0.456, 0.406])
51
  std = np.array([0.229, 0.224, 0.225])
@@ -53,28 +64,39 @@ def process_image(input_image, eps_value=0.3):
53
  adv_img_np = np.clip(adv_img_np, 0, 1)
54
  adv_img_np = adv_img_np.transpose(1, 2, 0)
55
 
 
56
  adv_image_pil = Image.fromarray((adv_img_np * 255).astype(np.uint8))
 
 
57
 
 
 
58
  wm_text = "123"
59
  bwm = WaterMark(password_img=123, password_wm=456)
60
-
 
61
  img_bytes = io.BytesIO()
62
- adv_image_pil.save(img_bytes, format='PNG')
63
  bwm.read_img(img_bytes)
64
  bwm.read_wm(wm_text, mode='str')
65
 
 
66
  bwm.embed(img_bytes)
67
- result_image = base64.b64encode(img_bytes.getvalue()).decode('utf-8')
68
 
69
- return result_image
 
 
 
70
 
71
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜
72
- def inference(image, eps_value):
73
- return process_image(image, eps_value)
 
74
 
75
- # ์ตœ์‹  Gradio์—์„œ inputs ๋Œ€์‹  components ์‚ฌ์šฉ
76
  gr.Interface(
77
- fn=inference,
78
  inputs=[gr.components.Image(type="pil"), gr.components.Slider(0.1, 1.0, step=0.1, value=0.3, label="Epsilon")],
79
  outputs="image"
80
  ).launch()
 
1
  import gradio as gr
2
  import torch
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
  from torchvision import transforms, models
6
  from art.attacks.evasion import FastGradientMethod
7
  from art.estimators.classification import PyTorchClassifier
 
10
  import io
11
  import base64
12
  from blind_watermark import WaterMark
13
+ import matplotlib.pyplot as plt
14
 
15
+ # Pretrained ResNet50 ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ (ImageNet ์‚ฌ์ „ ํ›ˆ๋ จ)
16
+ model = models.resnet50(pretrained=True)
17
+
18
+ # CIFAR-10์— ๋งž์ถฐ ๋งˆ์ง€๋ง‰ ๋ถ„๋ฅ˜ ๋ ˆ์ด์–ด ์ˆ˜์ •
19
+ num_ftrs = model.fc.in_features
20
+ model.fc = nn.Linear(num_ftrs, 10)
21
+
22
+ # ๋ชจ๋ธ์„ GPU๋กœ ์ด๋™
23
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
+ model = model.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # ์†์‹ค ํ•จ์ˆ˜์™€ ์˜ตํ‹ฐ๋งˆ์ด์ € ์„ค์ •
27
+ criterion = nn.CrossEntropyLoss()
28
+ optimizer = optim.Adam(model.parameters(), lr=0.001)
29
+
30
+ # PyTorchClassifier ์ƒ์„ฑ
31
+ classifier = PyTorchClassifier(
32
+ model=model,
33
+ loss=criterion,
34
+ optimizer=optimizer,
35
+ input_shape=(3, 64, 64),
36
+ nb_classes=10,
37
+ )
38
+
39
+ # ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ ํ•จ์ˆ˜
40
+ def preprocess_image(image):
41
  transform = transforms.Compose([
42
  transforms.ToTensor(),
43
  transforms.Normalize(mean=[0.485, 0.456, 0.406],
44
  std=[0.229, 0.224, 0.225])
45
  ])
46
+ return transform(image).unsqueeze(0).to(device)
47
 
48
+ # FGSM ๊ณต๊ฒฉ ์ ์šฉ ๋ฐ ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜
49
+ def generate_adversarial_image(image, eps_value):
50
+ img_tensor = preprocess_image(image)
51
 
52
+ # FGSM ๊ณต๊ฒฉ ์„ค์ •
53
  attack = FastGradientMethod(estimator=classifier, eps=eps_value)
54
+
55
+ # ์ ๋Œ€์  ์˜ˆ์ œ ์ƒ์„ฑ
56
  adv_img_tensor = attack.generate(x=img_tensor.cpu().numpy())
57
  adv_img_tensor = torch.tensor(adv_img_tensor).to(device)
58
 
59
+ # ์ ๋Œ€์  ์ด๋ฏธ์ง€ ๋ณ€ํ™˜
60
  adv_img_np = adv_img_tensor.squeeze(0).cpu().numpy()
61
  mean = np.array([0.485, 0.456, 0.406])
62
  std = np.array([0.229, 0.224, 0.225])
 
64
  adv_img_np = np.clip(adv_img_np, 0, 1)
65
  adv_img_np = adv_img_np.transpose(1, 2, 0)
66
 
67
+ # PIL ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜
68
  adv_image_pil = Image.fromarray((adv_img_np * 255).astype(np.uint8))
69
+
70
+ return adv_image_pil
71
 
72
+ # ์›Œํ„ฐ๋งˆํฌ ์‚ฝ์ž… ํ•จ์ˆ˜
73
+ def apply_watermark(image_pil):
74
  wm_text = "123"
75
  bwm = WaterMark(password_img=123, password_wm=456)
76
+
77
+ # ์ด๋ฏธ์ง€์— ์›Œํ„ฐ๋งˆํฌ ์‚ฝ์ž…
78
  img_bytes = io.BytesIO()
79
+ image_pil.save(img_bytes, format='PNG')
80
  bwm.read_img(img_bytes)
81
  bwm.read_wm(wm_text, mode='str')
82
 
83
+ # ์›Œํ„ฐ๋งˆํฌ ์‚ฝ์ž…
84
  bwm.embed(img_bytes)
85
+ return img_bytes
86
 
87
+ # Gradio ํ•จ์ˆ˜ (์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ๋ฐ ์ ๋Œ€์  ์˜ˆ์ œ ์ƒ์„ฑ)
88
+ def process_image(image, eps_value):
89
+ # ์ ๋Œ€์  ์ด๋ฏธ์ง€ ์ƒ์„ฑ
90
+ adv_image = generate_adversarial_image(image, eps_value)
91
 
92
+ # ์›Œํ„ฐ๋งˆํฌ ์ ์šฉ
93
+ result_image = apply_watermark(adv_image)
94
+
95
+ return result_image.getvalue()
96
 
97
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜
98
  gr.Interface(
99
+ fn=process_image,
100
  inputs=[gr.components.Image(type="pil"), gr.components.Slider(0.1, 1.0, step=0.1, value=0.3, label="Epsilon")],
101
  outputs="image"
102
  ).launch()