update all
Browse files
app.py
CHANGED
@@ -2,25 +2,31 @@ import numpy as np
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
from sklearn.linear_model import MultiTaskLasso, Lasso
|
4 |
import gradio as gr
|
|
|
5 |
|
6 |
rng = np.random.RandomState(42)
|
7 |
|
8 |
# Generate some 2D coefficients with sine waves with random frequency and phase
|
9 |
-
def make_plot(n_samples, n_features, n_tasks, n_relevant_features, alpha):
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
coef = np.zeros((n_tasks, n_features))
|
12 |
times = np.linspace(0, 2 * np.pi, n_tasks)
|
13 |
for k in range(n_relevant_features):
|
14 |
coef[:, k] = np.sin((1.0 + rng.randn(1)) * times + 3 * rng.randn(1))
|
15 |
-
|
16 |
X = rng.randn(n_samples, n_features)
|
17 |
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
|
18 |
-
|
19 |
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
|
20 |
coef_multi_task_lasso_ = MultiTaskLasso(alpha=alpha).fit(X, Y).coef_
|
21 |
-
|
22 |
fig = plt.figure(figsize=(8, 5))
|
23 |
-
|
24 |
feature_to_plot = 0
|
25 |
fig = plt.figure()
|
26 |
lw = 2
|
@@ -34,27 +40,31 @@ def make_plot(n_samples, n_features, n_tasks, n_relevant_features, alpha):
|
|
34 |
linewidth=lw,
|
35 |
label="MultiTaskLasso",
|
36 |
)
|
37 |
-
plt.legend(loc="upper center")
|
|
|
|
|
38 |
plt.axis("tight")
|
39 |
plt.ylim([-1.1, 1.1])
|
40 |
fig.suptitle("Lasso, MultiTaskLasso and Ground truth time series")
|
41 |
return fig
|
42 |
-
|
43 |
-
|
44 |
-
model_card=f"""
|
45 |
## Description
|
46 |
-
|
47 |
-
features
|
48 |
-
|
49 |
-
The multi-task lasso imposes that features that are selected at one time point are select
|
50 |
-
for all time point. This makes feature selection by the Lasso more stable.
|
51 |
## Model
|
52 |
currentmodule: sklearn.linear_model
|
53 |
class:`Lasso` and class: `MultiTaskLasso` are used in this example.
|
54 |
Plots represent Lasso, MultiTaskLasso and Ground truth time series
|
55 |
"""
|
56 |
|
57 |
-
with gr.Blocks(
|
|
|
|
|
|
|
|
|
58 |
gr.Markdown('''
|
59 |
<div>
|
60 |
<h1 style='text-align: center'> Joint feature selection with multi-task Lasso </h1>
|
@@ -63,19 +73,19 @@ with gr.Blocks() as demo:
|
|
63 |
gr.Markdown(model_card)
|
64 |
gr.Markdown("Original example Author: Alexandre Gramfort <[email protected]>")
|
65 |
gr.Markdown(
|
66 |
-
"Iterative conversion by: <a href=\"https://
|
67 |
)
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
btn = gr.Button(value = 'Submit')
|
78 |
|
79 |
btn.click(make_plot,inputs=[n_samples,n_features, n_tasks, n_relevant_features, alpha],outputs=[gr.Plot()])
|
80 |
|
81 |
-
demo.launch()
|
|
|
2 |
import matplotlib.pyplot as plt
|
3 |
from sklearn.linear_model import MultiTaskLasso, Lasso
|
4 |
import gradio as gr
|
5 |
+
import time
|
6 |
|
7 |
rng = np.random.RandomState(42)
|
8 |
|
9 |
# Generate some 2D coefficients with sine waves with random frequency and phase
|
10 |
+
def make_plot(n_samples, n_features, n_tasks, n_relevant_features, alpha, progress=gr.Progress()):
|
11 |
+
|
12 |
+
progress(0, desc="Starting...")
|
13 |
+
time.sleep(1)
|
14 |
+
for i in progress.tqdm(range(100)):
|
15 |
+
time.sleep(0.1)
|
16 |
|
17 |
coef = np.zeros((n_tasks, n_features))
|
18 |
times = np.linspace(0, 2 * np.pi, n_tasks)
|
19 |
for k in range(n_relevant_features):
|
20 |
coef[:, k] = np.sin((1.0 + rng.randn(1)) * times + 3 * rng.randn(1))
|
21 |
+
|
22 |
X = rng.randn(n_samples, n_features)
|
23 |
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
|
24 |
+
|
25 |
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
|
26 |
coef_multi_task_lasso_ = MultiTaskLasso(alpha=alpha).fit(X, Y).coef_
|
27 |
+
|
28 |
fig = plt.figure(figsize=(8, 5))
|
29 |
+
|
30 |
feature_to_plot = 0
|
31 |
fig = plt.figure()
|
32 |
lw = 2
|
|
|
40 |
linewidth=lw,
|
41 |
label="MultiTaskLasso",
|
42 |
)
|
43 |
+
#plt.legend(loc="upper center")
|
44 |
+
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
|
45 |
+
ncol=3, fancybox=True, shadow=True)
|
46 |
plt.axis("tight")
|
47 |
plt.ylim([-1.1, 1.1])
|
48 |
fig.suptitle("Lasso, MultiTaskLasso and Ground truth time series")
|
49 |
return fig
|
50 |
+
|
51 |
+
|
52 |
+
model_card = f"""
|
53 |
## Description
|
54 |
+
Multi-task Lasso allows us to jointly fit multiple regression problems by enforcing the selected features to be the same across tasks. This example simulates sequential measurement. Each task
|
55 |
+
is a time instant, and the relevant features, while being the same, vary in amplitude over time. Multi-task lasso imposes that features that are selected at one time point are selected
|
56 |
+
for all time points. This makes feature selection more stable than by regular Lasso.
|
|
|
|
|
57 |
## Model
|
58 |
currentmodule: sklearn.linear_model
|
59 |
class:`Lasso` and class: `MultiTaskLasso` are used in this example.
|
60 |
Plots represent Lasso, MultiTaskLasso and Ground truth time series
|
61 |
"""
|
62 |
|
63 |
+
with gr.Blocks(theme=gr.themes.Glass(primary_hue=gr.themes.colors.gray,
|
64 |
+
secondary_hue=gr.themes.colors.sky,
|
65 |
+
text_size=gr.themes.sizes.text_lg),
|
66 |
+
css=".gradio-container {background-color: zinc }") as demo:
|
67 |
+
|
68 |
gr.Markdown('''
|
69 |
<div>
|
70 |
<h1 style='text-align: center'> Joint feature selection with multi-task Lasso </h1>
|
|
|
73 |
gr.Markdown(model_card)
|
74 |
gr.Markdown("Original example Author: Alexandre Gramfort <[email protected]>")
|
75 |
gr.Markdown(
|
76 |
+
"Iterative conversion by: <a href=\"https://www.deamarialeon.com\">Dea María Léon</a>"
|
77 |
)
|
78 |
+
gr.Markdown("### Please select values and click submit:")
|
79 |
+
|
80 |
+
with gr.Row().style(equal_height=True):
|
81 |
+
n_samples = gr.Slider(50,500,value=100,step=50,label='Number of samples')
|
82 |
+
n_features = gr.Slider(5,50,value=30,step=5,label='Features')
|
83 |
+
n_tasks = gr.Slider(5,50,value=40,step=5,label='Tasks')
|
84 |
+
n_relevant_features = gr.Slider(1,10,value=5,step=1,label='Relevant features')
|
85 |
+
alpha = gr.Slider(0,10,value=1.0,step=0.5,label='Alpha Range')
|
86 |
+
|
87 |
btn = gr.Button(value = 'Submit')
|
88 |
|
89 |
btn.click(make_plot,inputs=[n_samples,n_features, n_tasks, n_relevant_features, alpha],outputs=[gr.Plot()])
|
90 |
|
91 |
+
demo.queue().launch()
|