andromeda01111 commited on
Commit
84c111e
Β·
verified Β·
1 Parent(s): db0a0ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -27
app.py CHANGED
@@ -64,7 +64,7 @@ def classify(model_choice, image=None, *features):
64
  """Classify using ViT (image) or NN (features)."""
65
  if model_choice == "ViT":
66
  if image is None:
67
- return "Please upload an image for ViT classification."
68
  image = image.convert("RGB")
69
  input_tensor = transform(image).unsqueeze(0).to(device)
70
 
@@ -72,52 +72,59 @@ def classify(model_choice, image=None, *features):
72
  output = vit_model(input_tensor)
73
  predicted_class = torch.argmax(output, dim=1).item()
74
 
75
- return class_names[predicted_class]
76
 
77
  elif model_choice == "Neural Network":
78
  if any(f is None for f in features):
79
- return "Please enter all 30 numerical features."
80
 
81
  input_data = np.array(features).reshape(1, -1)
82
  input_data_std = scaler.transform(input_data) if scaler else input_data
83
  prediction = nn_model.predict(input_data_std) if nn_model else [[0, 1]]
84
  predicted_class = np.argmax(prediction)
85
 
86
- return class_names[predicted_class]
87
 
88
  # Gradio UI
89
  with gr.Blocks() as demo:
90
- gr.Markdown("# Breast Cancer Classification")
91
- gr.Markdown("Choose between ViT (image-based) and Neural Network (feature-based) classification.")
92
 
93
- model_selector = gr.Radio(["ViT", "Neural Network"], label="Choose Model")
94
- image_input = gr.Image(type="pil", label="Upload Image")
 
 
95
 
96
- # Arrange feature inputs in a matrix layout (3 columns)
97
- num_columns = 3
98
- feature_inputs = []
99
 
 
100
  with gr.Row():
101
- columns = [gr.Column() for _ in range(num_columns)]
102
- for i, feature in enumerate(feature_names):
103
- with columns[i % num_columns]:
104
- feature_inputs.append(gr.Number(label=feature, scale=1))
 
105
 
 
106
  def fill_example(example):
107
  """Pre-fills example inputs."""
108
  return {feature_inputs[i]: example[i] for i in range(len(feature_inputs))}
109
 
110
- examples = [
111
- ["Neural Network", None] + benign_example,
112
- ["Neural Network", None] + malignant_example
113
- ]
114
-
115
- gr.Interface(
116
- fn=classify,
117
- inputs=[model_selector, image_input] + feature_inputs,
118
- outputs="text",
119
- examples=examples,
120
- live=True
121
- ).render()
 
 
 
 
122
 
123
  demo.launch()
 
64
  """Classify using ViT (image) or NN (features)."""
65
  if model_choice == "ViT":
66
  if image is None:
67
+ return "❌ Please upload an image for ViT classification."
68
  image = image.convert("RGB")
69
  input_tensor = transform(image).unsqueeze(0).to(device)
70
 
 
72
  output = vit_model(input_tensor)
73
  predicted_class = torch.argmax(output, dim=1).item()
74
 
75
+ return f"πŸ” **Prediction:** {class_names[predicted_class]}"
76
 
77
  elif model_choice == "Neural Network":
78
  if any(f is None for f in features):
79
+ return "❌ Please enter all 30 numerical features."
80
 
81
  input_data = np.array(features).reshape(1, -1)
82
  input_data_std = scaler.transform(input_data) if scaler else input_data
83
  prediction = nn_model.predict(input_data_std) if nn_model else [[0, 1]]
84
  predicted_class = np.argmax(prediction)
85
 
86
+ return f"πŸ” **Prediction:** {class_names[predicted_class]}"
87
 
88
  # Gradio UI
89
  with gr.Blocks() as demo:
90
+ gr.Markdown("## 🩺 Breast Cancer Classification Model")
91
+ gr.Markdown("Select a model and provide input data to classify breast cancer as **Benign** or **Malignant**.")
92
 
93
+ with gr.Row():
94
+ model_selector = gr.Radio(["ViT", "Neural Network"], label="πŸ”¬ Choose Model", value="ViT")
95
+
96
+ image_input = gr.Image(type="pil", label="πŸ“· Upload Image (for ViT)", visible=True)
97
 
98
+ feature_inputs = [gr.Number(label=feature) for feature in feature_names]
 
 
99
 
100
+ # Dynamically arrange feature inputs into rows of 3 columns
101
  with gr.Row():
102
+ for i in range(0, len(feature_inputs), 3):
103
+ with gr.Column():
104
+ for j in range(3):
105
+ if i + j < len(feature_inputs):
106
+ feature_inputs[i + j].render()
107
 
108
+ # Example buttons
109
  def fill_example(example):
110
  """Pre-fills example inputs."""
111
  return {feature_inputs[i]: example[i] for i in range(len(feature_inputs))}
112
 
113
+ with gr.Row():
114
+ example_btn_1 = gr.Button("πŸ”΅ Benign Example")
115
+ example_btn_2 = gr.Button("πŸ”΄ Malignant Example")
116
+
117
+ output_text = gr.Textbox(label="πŸ” Model Prediction", interactive=False)
118
+
119
+ # Logic to toggle inputs based on model selection
120
+ def toggle_inputs(choice):
121
+ return gr.update(visible=(choice == "ViT")), gr.update(visible=(choice == "Neural Network"))
122
+
123
+ model_selector.change(toggle_inputs, model_selector, [image_input, *feature_inputs])
124
+ example_btn_1.click(lambda: fill_example(benign_example), None, feature_inputs)
125
+ example_btn_2.click(lambda: fill_example(malignant_example), None, feature_inputs)
126
+
127
+ classify_button = gr.Button("πŸš€ Classify")
128
+ classify_button.click(classify, [model_selector, image_input] + feature_inputs, output_text)
129
 
130
  demo.launch()