NLPV commited on
Commit
d1dfa52
Β·
verified Β·
1 Parent(s): d0f2b21

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -29,6 +29,9 @@ def predict(model, image_tensor):
29
  with torch.no_grad():
30
  outputs = model(image_tensor.unsqueeze(0))
31
  probs = torch.nn.functional.softmax(outputs[0], dim=0)
 
 
 
32
  pred = torch.argmax(probs).item()
33
  return probs, pred
34
 
@@ -41,9 +44,13 @@ def unlearn(model, image_tensor, label_idx, learning_rate, steps=10):
41
  criterion = nn.CrossEntropyLoss()
42
  optimizer = optim.SGD(model.parameters(), lr=learning_rate)
43
 
44
- for _ in range(steps):
45
  output = model(image_tensor.unsqueeze(0))
46
  loss = -criterion(output, torch.tensor([label_idx]))
 
 
 
 
47
  optimizer.zero_grad()
48
  loss.backward()
49
  optimizer.step()
@@ -77,6 +84,7 @@ def run_unlearning(index_to_unlearn, learning_rate):
77
  # Get sample
78
  image_tensor, label_idx = trainset[index_to_unlearn]
79
  label_name = cifar10_classes[label_idx]
 
80
 
81
  # Prediction before
82
  probs_before, pred_before = predict(original_model, image_tensor)
@@ -94,7 +102,8 @@ def run_unlearning(index_to_unlearn, learning_rate):
94
  unlearn_acc, unlearn_loss = evaluate_model(unlearned_model, testloader)
95
 
96
  result = f"""
97
- πŸ“ Index Unlearned: {index_to_unlearn} | Label: {label_name}
 
98
 
99
  πŸ”Ž BEFORE Unlearning:
100
  - Prediction: {cifar10_classes[pred_before]}
@@ -117,7 +126,7 @@ demo = gr.Interface(
117
  fn=run_unlearning,
118
  inputs=[
119
  gr.Slider(0, len(trainset)-1, step=1, label="Select Index to Unlearn"),
120
- gr.Slider(0.0001, 0.1, step=0.0001, value=0.01, label="Learning Rate (for Unlearning)")
121
  ],
122
  outputs="text",
123
  title="πŸ” CIFAR-10 Machine Unlearning",
 
29
  with torch.no_grad():
30
  outputs = model(image_tensor.unsqueeze(0))
31
  probs = torch.nn.functional.softmax(outputs[0], dim=0)
32
+ if torch.isnan(probs).any():
33
+ print("⚠️ Warning: NaN detected in prediction probabilities")
34
+ probs = torch.zeros_like(probs)
35
  pred = torch.argmax(probs).item()
36
  return probs, pred
37
 
 
44
  criterion = nn.CrossEntropyLoss()
45
  optimizer = optim.SGD(model.parameters(), lr=learning_rate)
46
 
47
+ for i in range(steps):
48
  output = model(image_tensor.unsqueeze(0))
49
  loss = -criterion(output, torch.tensor([label_idx]))
50
+ if torch.isnan(loss):
51
+ print(f"❌ NaN detected in loss at step {i}. Stopping unlearning.")
52
+ break
53
+ print(f"🧠 Step {i+1}/{steps} - Unlearning Loss: {loss.item():.4f}")
54
  optimizer.zero_grad()
55
  loss.backward()
56
  optimizer.step()
 
84
  # Get sample
85
  image_tensor, label_idx = trainset[index_to_unlearn]
86
  label_name = cifar10_classes[label_idx]
87
+ print(f"πŸ—‚οΈ Actual Label Index: {label_idx} | Label Name: {label_name}")
88
 
89
  # Prediction before
90
  probs_before, pred_before = predict(original_model, image_tensor)
 
102
  unlearn_acc, unlearn_loss = evaluate_model(unlearned_model, testloader)
103
 
104
  result = f"""
105
+ πŸ“ Index Unlearned: {index_to_unlearn}
106
+ πŸ—‚οΈ Actual Label: {label_name} (Index: {label_idx})
107
 
108
  πŸ”Ž BEFORE Unlearning:
109
  - Prediction: {cifar10_classes[pred_before]}
 
126
  fn=run_unlearning,
127
  inputs=[
128
  gr.Slider(0, len(trainset)-1, step=1, label="Select Index to Unlearn"),
129
+ gr.Slider(0.0001, 0.05, step=0.0001, value=0.005, label="Learning Rate (for Unlearning)")
130
  ],
131
  outputs="text",
132
  title="πŸ” CIFAR-10 Machine Unlearning",