code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def managers_one_page_two_components_two_controls(vizro_app, dash_data_table_with_id):
"""Instantiates managers with one page that contains two controls and two components."""
vm.Dashboard(
pages=[
vm.Page(
id="test_page",
title="First page",
components=[
vm.Table(
id="vizro_table",
figure=dash_data_table_with_id,
actions=[
vm.Action(
id="table_filter_interaction_action",
function=filter_interaction(targets=["scatter_chart"]),
)
],
),
vm.Graph(id="scatter_chart", figure=px.scatter(px.data.gapminder(), x="lifeExp", y="gdpPercap")),
vm.Button(
id="export_data_button", actions=[vm.Action(id="export_data_action", function=export_data())]
),
],
controls=[
vm.Filter(
id="filter_continent", column="continent", selector=vm.Dropdown(id="filter_continent_selector")
),
vm.Parameter(
id="parameter_x",
targets=["scatter_chart.x"],
selector=vm.Dropdown(id="parameter_x_selector", options=["lifeExp", "gdpPercap", "pop"]),
),
],
)
]
)
Vizro._pre_build()
|
Instantiates managers with one page that contains two controls and two components.
|
managers_one_page_two_components_two_controls
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/actions/_action_loop/test_get_action_loop_components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/actions/_action_loop/test_get_action_loop_components.py
|
Apache-2.0
|
def managers_one_page_no_actions(vizro_app):
"""Instantiates managers with one "empty" page."""
vm.Dashboard(
pages=[
vm.Page(
id="test_page_no_actions",
title="Second page",
components=[
vm.Card(text=""),
],
),
]
)
Vizro._pre_build()
|
Instantiates managers with one "empty" page.
|
managers_one_page_no_actions
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/actions/_action_loop/test_get_action_loop_components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/actions/_action_loop/test_get_action_loop_components.py
|
Apache-2.0
|
def test_model_type_none_root_model_none(self):
"""model_type is None | page is None -> return all elements."""
result = [model.id for model in model_manager._get_models()]
expected = {
"page_1_id",
"page_1_button_id",
"page_1_graph_id",
"page_2_id",
"page_2_button_id",
"page_2_figure_id",
}
# model_manager._get_models() returns all models in the dashboard, all along with Layout, Navigation, Dashboard
# models. That's the reason why we assert with the 'issubset' instead of 'equal'.
assert expected.issubset(result)
|
model_type is None | page is None -> return all elements.
|
test_model_type_none_root_model_none
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_type_root_model_none(self):
"""model_type is vm.Button | root_model is None -> return all vm.Button from the dashboard."""
result = [model.id for model in model_manager._get_models(model_type=vm.Button)]
expected = {"page_1_button_id", "page_2_button_id"}
excluded = {"page_1_id", "page_1_graph_id", "page_2_id", "page_2_figure_id"}
assert expected.issubset(result)
assert excluded.isdisjoint(result)
|
model_type is vm.Button | root_model is None -> return all vm.Button from the dashboard.
|
test_model_type_root_model_none
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_type_none_root_model_not_none(self, page_1):
"""model_type is None | root_model is page_1 -> return all elements from the page_1."""
result = [model.id for model in model_manager._get_models(root_model=page_1)]
expected = {"page_1_id", "page_1_button_id", "page_1_graph_id"}
excluded = {"page_2_id", "page_2_button_id", "page_2_figure_id"}
assert expected.issubset(result)
assert excluded.isdisjoint(result)
|
model_type is None | root_model is page_1 -> return all elements from the page_1.
|
test_model_type_none_root_model_not_none
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_type_not_none_page_not_none(self, page_1):
"""model_type is vm.Button | page is page_1 -> return all vm.Button from the page_1."""
result = [model.id for model in model_manager._get_models(model_type=vm.Button, root_model=page_1)]
expected = {"page_1_button_id"}
excluded = {"page_1_id", "page_1_graph_id", "page_2_id", "page_2_button_id", "page_2_figure_id"}
assert expected.issubset(result)
assert excluded.isdisjoint(result)
|
model_type is vm.Button | page is page_1 -> return all vm.Button from the page_1.
|
test_model_type_not_none_page_not_none
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_type_no_match_root_model_none(self):
"""model_type matches no type | root_model is None -> return empty list."""
# There is no AgGrid in the dashboard
result = [model.id for model in model_manager._get_models(model_type=vm.AgGrid)]
assert result == []
|
model_type matches no type | root_model is None -> return empty list.
|
test_model_type_no_match_root_model_none
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_type_no_match_root_model_not_none(self, page_1):
"""model_type matches no type | root_model is page_1 -> return empty list."""
# There is no AgGrid in the page_1
result = [model.id for model in model_manager._get_models(model_type=vm.AgGrid, root_model=page_1)]
assert result == []
|
model_type matches no type | root_model is page_1 -> return empty list.
|
test_model_type_no_match_root_model_not_none
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_type_tuple_of_models(self):
"""model_type is tuple of models -> return all elements of the specified types from the dashboard."""
result = [model.id for model in model_manager._get_models(model_type=(vm.Button, vm.Graph))]
expected = {"page_1_button_id", "page_1_graph_id", "page_2_button_id"}
excluded = {"page_1_id", "page_2_id", "page_2_figure_id"}
assert expected.issubset(result)
assert excluded.isdisjoint(result)
|
model_type is tuple of models -> return all elements of the specified types from the dashboard.
|
test_model_type_tuple_of_models
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_type_figure_models(self):
"""model_type is FIGURE_MODELS | root_model is None -> return all figure elements from the dashboard."""
result = [model.id for model in model_manager._get_models(model_type=FIGURE_MODELS)]
expected = {"page_1_graph_id", "page_2_figure_id"}
excluded = {"page_1_id", "page_1_button_id", "page_2_id", "page_2_button_id"}
assert expected.issubset(result)
assert excluded.isdisjoint(result)
|
model_type is FIGURE_MODELS | root_model is None -> return all figure elements from the dashboard.
|
test_model_type_figure_models
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_subclass_model_type(self, page_1, standard_px_chart):
"""model_type is subclass of vm.Graph -> return all elements of the specified type and its subclasses."""
class CustomGraph(vm.Graph):
pass
page_1.components.append(CustomGraph(id="page_1_custom_graph_id", figure=standard_px_chart))
# Return CustomGraph and don't return Graph
custom_graph_result = [model.id for model in model_manager._get_models(model_type=CustomGraph)]
assert "page_1_custom_graph_id" in custom_graph_result
assert "page_1_graph_id" not in custom_graph_result
# Return CustomGraph and Graph
vm_graph_result = [model.id for model in model_manager._get_models(model_type=vm.Graph)]
assert "page_1_custom_graph_id" in vm_graph_result
assert "page_1_graph_id" in vm_graph_result
|
model_type is subclass of vm.Graph -> return all elements of the specified type and its subclasses.
|
test_subclass_model_type
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_nested_models(self, page_1, make_nested_control):
"""Model is nested under another model and known property in different ways -> return the model."""
class ControlGroup(vm.VizroBaseModel):
controls: Any
page_1.controls.append(
ControlGroup(controls=make_nested_control(vm.Filter(id="page_1_control_1", column="year")))
)
result = [model.id for model in model_manager._get_models(model_type=vm.Filter, root_model=page_1)]
assert "page_1_control_1" in result
|
Model is nested under another model and known property in different ways -> return the model.
|
test_nested_models
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_under_unknown_field(self, page_1):
"""Model is nested under another model but under an unknown field -> don't return the model."""
class ControlGroup(vm.VizroBaseModel):
unknown_field: Any
page_1.controls.append(ControlGroup(unknown_field=vm.Filter(id="page_1_control_1", column="year")))
result = [model.id for model in model_manager._get_models(model_type=vm.Filter, root_model=page_1)]
assert "page_1_control_1" not in result
|
Model is nested under another model but under an unknown field -> don't return the model.
|
test_model_under_unknown_field
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_root_model_container(self, container_1):
"""model_type is None | root_model is container_1 -> return all elements from the container_1."""
result = [model.id for model in model_manager._get_models(root_model=container_1)]
expected = {"container_1_id", "container_1_button_id", "container_1_graph_id"}
excluded = {"page_2_id", "page_2_button_id", "page_2_figure_id"}
assert expected.issubset(result)
assert excluded.isdisjoint(result)
|
model_type is None | root_model is container_1 -> return all elements from the container_1.
|
test_root_model_container
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_in_page(self, page_1):
"""Model is in page -> return page."""
result = model_manager._get_model_page(page_1.components[0])
assert result == page_1
|
Model is in page -> return page.
|
test_model_in_page
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_not_in_page(self, page_1):
"""Model is not in page -> return None."""
# Instantiate standalone model
button = vm.Button(id="standalone_button_id")
result = model_manager._get_model_page(button)
assert result is None
|
Model is not in page -> return None.
|
test_model_not_in_page
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_model_is_page(self, page_1):
"""Model is Page -> return that page."""
result = model_manager._get_model_page(page_1)
assert result == page_1
|
Model is Page -> return that page.
|
test_model_is_page
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/managers/test_model_manager.py
|
Apache-2.0
|
def test_add_same_model(self, Parent):
"""Test whether adding same model re-defined avoids pydantic discriminator error."""
class MultipleChild(vm.VizroBaseModel):
type: Literal["derived"] = "derived"
Parent.add_type("child", MultipleChild)
class MultipleChild(vm.VizroBaseModel):
type: Literal["derived"] = "derived"
Parent.add_type("child", MultipleChild)
|
Test whether adding same model re-defined avoids pydantic discriminator error.
|
test_add_same_model
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/test_base.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/test_base.py
|
Apache-2.0
|
def test_add_duplicate_type(self, Parent):
"""Test whether adding model of same type avoids pydantic discriminator error."""
class MultipleChild(ChildX):
pass
Parent.add_type("child", MultipleChild)
|
Test whether adding model of same type avoids pydantic discriminator error.
|
test_add_duplicate_type
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/test_base.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/test_base.py
|
Apache-2.0
|
def test_button_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
result = vm.Button(
id="button", text="Click me!", extra={"color": "success", "outline": True, "href": "www.google.com"}
).build()
assert_component_equal(
result,
dbc.Button(
html.Span(["Click me!", None], className="button-text"),
id="button",
color="success",
outline=True,
href="www.google.com",
target="_top",
),
)
|
Test that extra arguments correctly override defaults.
|
test_button_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/test_button.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/test_button.py
|
Apache-2.0
|
def test_button_build_with_description(self):
"""Test that description argument correctly builds icon and tooltip."""
result = vm.Button(
id="button",
text="Click me",
description=vm.Tooltip(text="Test description", icon="info", id="info"),
).build()
expected_description = [
html.Span("info", id="info-icon", className="material-symbols-outlined tooltip-icon"),
dbc.Tooltip(
children=dcc.Markdown("Test description", id="info-text", className="card-text"),
id="info",
target="info-icon",
autohide=False,
),
]
assert_component_equal(
result,
dbc.Button(
html.Span(["Click me", *expected_description], className="button-text"),
id="button",
href="",
target="_top",
color="primary",
),
)
|
Test that description argument correctly builds icon and tooltip.
|
test_button_build_with_description
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/test_button.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/test_button.py
|
Apache-2.0
|
def test_card_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
card = vm.Card(id="card_id", text="Hello", extra={"class_name": "bg-primary p-1 mt-2 text-center h2"}).build()
assert_component_equal(
card,
dbc.Card(
id="card_id",
children=dcc.Markdown(
id="card_id-text", children="Hello", dangerously_allow_html=False, className="card-text"
),
class_name="bg-primary p-1 mt-2 text-center h2",
),
)
|
Test that extra arguments correctly override defaults.
|
test_card_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/test_card.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/test_card.py
|
Apache-2.0
|
def test_container_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
result = vm.Container(
id="container",
title="Title",
components=[vm.Button()],
extra={"fluid": False, "class_name": "bg-container"},
).build()
assert_component_equal(
result, dbc.Container(id="container", fluid=False, class_name="bg-container"), keys_to_strip={"children"}
)
|
Test that extra arguments correctly override defaults.
|
test_container_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/test_container.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/test_container.py
|
Apache-2.0
|
def test_text_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
text = vm.Text(id="text_id", text="Test", extra={"className": "bg-primary p-1 mt-2 text-center h2"})
text = text.build()
expected = dcc.Markdown(
id="text_id",
children="Test",
dangerously_allow_html=False,
className="bg-primary p-1 mt-2 text-center h2",
)
assert_component_equal(text, expected)
|
Test that extra arguments correctly override defaults.
|
test_text_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/test_text.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/test_text.py
|
Apache-2.0
|
def test_checklist_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
checklist = Checklist(
id="checklist_id",
options=["A", "B", "C"],
value=["A"],
title="Title",
extra={
"switch": True,
"inline": True,
"id": "overridden_id",
},
).build()
expected_checklist = html.Fieldset(
[
html.Legend([html.Span("Title", id="checklist_id_title"), None], className="form-label"),
dbc.Checklist(
id="overridden_id",
options=["ALL", "A", "B", "C"],
value=["A"],
persistence=True,
persistence_type="session",
switch=True,
inline=True,
),
],
)
assert_component_equal(checklist, expected_checklist)
|
Test that extra arguments correctly override defaults.
|
test_checklist_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_checklist.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_checklist.py
|
Apache-2.0
|
def test_checklist_build_with_description(self):
"""Test that description arguments correctly builds icon and tooltip."""
checklist = Checklist(
options=["A", "B", "C"],
value=["A"],
title="Title",
description=Tooltip(text="Test description", icon="info", id="info"),
).build()
expected_description = [
html.Span("info", id="info-icon", className="material-symbols-outlined tooltip-icon"),
dbc.Tooltip(
children=dcc.Markdown("Test description", id="info-text", className="card-text"),
id="info",
target="info-icon",
autohide=False,
),
]
expected_checklist = html.Fieldset(
[
html.Legend(
[html.Span("Title", id="checklist_id_title"), *expected_description], className="form-label"
),
dbc.Checklist(
options=["ALL", "A", "B", "C"],
value=["A"],
persistence=True,
persistence_type="session",
),
],
)
assert_component_equal(checklist, expected_checklist, keys_to_strip={"id"})
|
Test that description arguments correctly builds icon and tooltip.
|
test_checklist_build_with_description
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_checklist.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_checklist.py
|
Apache-2.0
|
def test_datepicker_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
date_picker = vm.DatePicker(
id="datepicker_id",
min="2023-01-01",
max="2023-07-01",
value="2023-01-05",
range=False,
title="Title",
extra={"clearable": True, "placeholder": "Select a date"},
).build()
expected_datepicker = html.Div(
[
dbc.Label([html.Span("Title", id="datepicker_id_title"), None], html_for="datepicker_id"),
dmc.DatePickerInput(
id="datepicker_id",
minDate="2023-01-01",
value="2023-01-05",
maxDate="2023-07-01",
persistence=True,
persistence_type="session",
type="default",
allowSingleDateInRange=True,
withCellSpacing=False,
clearable=True,
placeholder="Select a date",
),
],
)
assert_component_equal(date_picker, expected_datepicker)
|
Test that extra arguments correctly override defaults.
|
test_datepicker_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_date_picker.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_date_picker.py
|
Apache-2.0
|
def test_datepicker_build_with_description(self):
"""Test that extra arguments correctly override defaults."""
date_picker = vm.DatePicker(
id="datepicker_id",
min="2023-01-01",
max="2023-07-01",
value="2023-01-05",
range=False,
title="Title",
description=vm.Tooltip(text="Test description", icon="info", id="info"),
).build()
expected_description = [
html.Span("info", id="info-icon", className="material-symbols-outlined tooltip-icon"),
dbc.Tooltip(
children=dcc.Markdown("Test description", id="info-text", className="card-text"),
id="info",
target="info-icon",
autohide=False,
),
]
expected_datepicker = html.Div(
[
dbc.Label(
[html.Span("Title", id="datepicker_id_title"), *expected_description],
html_for="datepicker_id",
),
dmc.DatePickerInput(
id="datepicker_id",
minDate="2023-01-01",
value="2023-01-05",
maxDate="2023-07-01",
persistence=True,
persistence_type="session",
type="default",
allowSingleDateInRange=True,
withCellSpacing=False,
),
],
)
assert_component_equal(date_picker, expected_datepicker)
|
Test that extra arguments correctly override defaults.
|
test_datepicker_build_with_description
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_date_picker.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_date_picker.py
|
Apache-2.0
|
def test_dropdown_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
dropdown = Dropdown(
options=["A", "B", "C"],
title="Title",
id="dropdown_id",
extra={
"clearable": True,
"optionHeight": 150,
"id": "overridden_id",
},
).build()
expected_dropdown = html.Div(
[
dbc.Label([html.Span("Title", id="dropdown_id_title"), None], html_for="dropdown_id"),
dcc.Dropdown(
id="overridden_id",
options=[
{"label": html.Div(["ALL"]), "value": "ALL"},
{"label": "A", "value": "A"},
{"label": "B", "value": "B"},
{"label": "C", "value": "C"},
],
value="ALL",
multi=True,
persistence=True,
persistence_type="session",
className="dropdown",
clearable=True,
optionHeight=150,
),
]
)
assert_component_equal(dropdown, expected_dropdown)
|
Test that extra arguments correctly override defaults.
|
test_dropdown_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_dropdown.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_dropdown.py
|
Apache-2.0
|
def test_radio_items_build_with_extra(self):
"""Test that extra arguments correctly override defaults."""
radio_items = RadioItems(
id="radio_items",
options=["A", "B", "C"],
title="Title",
extra={
"inline": True,
"id": "overridden_id",
},
).build()
expected_radio_items = html.Fieldset(
[
html.Legend([html.Span("Title", id="radio_items_title"), None], className="form-label"),
dbc.RadioItems(
id="overridden_id",
options=["A", "B", "C"],
value="A",
persistence=True,
persistence_type="session",
inline=True,
),
]
)
assert_component_equal(radio_items, expected_radio_items)
|
Test that extra arguments correctly override defaults.
|
test_radio_items_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_radio_items.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_radio_items.py
|
Apache-2.0
|
def test_range_slider_build_with_extra(self, expected_range_slider_with_extra):
"""Test that extra arguments correctly override defaults."""
range_slider = vm.RangeSlider(
id="range_slider",
min=0.0,
max=10.0,
step=2,
marks={1: "1", 5: "5", 10: "10"},
value=[0, 10],
title="Title",
extra={
"tooltip": {"placement": "bottom", "always_visible": True},
"pushable": 20,
"id": "overridden_id",
},
).build()
assert_component_equal(range_slider, expected_range_slider_with_extra)
|
Test that extra arguments correctly override defaults.
|
test_range_slider_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_range_slider.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_range_slider.py
|
Apache-2.0
|
def test_range_slider_build_with_description(self, expected_range_slider_with_description):
"""Test that description arguments correctly builds icon and tooltip."""
range_slider = vm.RangeSlider(
id="range_slider",
min=0.0,
max=10.0,
step=2,
marks={1: "1", 5: "5", 10: "10"},
value=[0, 10],
title="Title",
description=vm.Tooltip(text="Test description", icon="info", id="info"),
).build()
assert_component_equal(range_slider, expected_range_slider_with_description)
|
Test that description arguments correctly builds icon and tooltip.
|
test_range_slider_build_with_description
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_range_slider.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_range_slider.py
|
Apache-2.0
|
def test_slider_build_with_extra(self, expected_slider_extra):
"""Test that extra arguments correctly override defaults."""
slider = vm.Slider(
id="slider_id",
min=0,
max=10,
step=1,
value=5,
title="Title",
extra={
"tooltip": {"placement": "bottom", "always_visible": True},
"id": "overridden_id",
},
).build()
assert_component_equal(slider, expected_slider_extra)
|
Test that extra arguments correctly override defaults.
|
test_slider_build_with_extra
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_components/form/test_slider.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_components/form/test_slider.py
|
Apache-2.0
|
def managers_one_page_two_graphs(gapminder):
"""Instantiates a simple model_manager and data_manager with a page, and two graph models and gapminder data."""
vm.Page(
id="test_page",
title="My first dashboard",
components=[
vm.Graph(id="scatter_chart", figure=px.scatter(gapminder, x="lifeExp", y="gdpPercap")),
vm.Graph(id="bar_chart", figure=px.bar(gapminder, x="country", y="gdpPercap")),
],
)
Vizro._pre_build()
|
Instantiates a simple model_manager and data_manager with a page, and two graph models and gapminder data.
|
managers_one_page_two_graphs
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_controls/conftest.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_controls/conftest.py
|
Apache-2.0
|
def managers_one_page_container_controls(gapminder):
"""Instantiates a simple model_manager and data_manager with a page, and two graph models and gapminder data."""
vm.Page(
id="test_container",
title="My first dashboard",
components=[
vm.Container(
title="",
components=[
vm.Graph(id="scatter_chart", figure=px.scatter(gapminder, x="lifeExp", y="gdpPercap")),
],
controls=[
vm.Filter(id="container_filter", column="continent", selector=vm.Checklist(value=["Europe"])),
vm.Parameter(
id="container_parameter",
targets=["scatter_chart.x"],
selector=vm.Checklist(options=["lifeExp", "gdpPercap", "pop"], value=["lifeExp"]),
),
],
),
vm.Graph(id="bar_chart", figure=px.bar(gapminder, x="country", y="gdpPercap")),
],
)
|
Instantiates a simple model_manager and data_manager with a page, and two graph models and gapminder data.
|
managers_one_page_container_controls
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_controls/conftest.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_controls/conftest.py
|
Apache-2.0
|
def managers_one_page_container_controls_invalid(gapminder):
"""Instantiates a simple model_manager and data_manager with a page, and two graph models and gapminder data."""
vm.Page(
id="test_container",
title="My first dashboard",
components=[
vm.Container(
id="container_1",
title="",
components=[
vm.Graph(id="scatter_chart", figure=px.scatter(gapminder, x="lifeExp", y="gdpPercap")),
],
controls=[
vm.Filter(
id="container_filter_2", column="continent", selector=vm.Checklist(), targets=["bar_chart"]
),
],
),
vm.Container(
title="", components=[vm.Graph(id="bar_chart", figure=px.bar(gapminder, x="country", y="gdpPercap"))]
),
],
)
|
Instantiates a simple model_manager and data_manager with a page, and two graph models and gapminder data.
|
managers_one_page_container_controls_invalid
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_controls/conftest.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_controls/conftest.py
|
Apache-2.0
|
def managers_column_different_type():
"""Instantiates the managers with a page and two graphs sharing the same column but of different data types."""
df_numerical = pd.DataFrame({"shared_column": [1]})
df_temporal = pd.DataFrame({"shared_column": [datetime(2024, 1, 1)]})
df_categorical = pd.DataFrame({"shared_column": ["a"]})
vm.Page(
id="test_page",
title="Page Title",
components=[
vm.Graph(id="column_numerical", figure=px.scatter(df_numerical)),
vm.Graph(id="column_temporal", figure=px.scatter(df_temporal)),
vm.Graph(id="column_categorical", figure=px.scatter(df_categorical)),
],
)
Vizro._pre_build()
|
Instantiates the managers with a page and two graphs sharing the same column but of different data types.
|
managers_column_different_type
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_controls/test_filter.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_controls/test_filter.py
|
Apache-2.0
|
def managers_column_only_exists_in_some():
"""Dataframes with column_numerical and column_categorical, which can be different lengths."""
vm.Page(
id="test_page",
title="Page Title",
components=[
vm.Graph(id="column_numerical_exists_1", figure=px.scatter(pd.DataFrame({"column_numerical": [1]}))),
vm.Graph(id="column_numerical_exists_2", figure=px.scatter(pd.DataFrame({"column_numerical": [1, 2]}))),
vm.Graph(id="column_numerical_exists_empty", figure=px.scatter(pd.DataFrame({"column_numerical": []}))),
vm.Graph(id="column_categorical_exists_1", figure=px.scatter(pd.DataFrame({"column_categorical": ["a"]}))),
vm.Graph(
id="column_categorical_exists_2", figure=px.scatter(pd.DataFrame({"column_categorical": ["a", "b"]}))
),
vm.Graph(
id="column_temporal_exists_1",
figure=px.scatter(pd.DataFrame({"column_temporal": [datetime(2024, 1, 1)]})),
),
vm.Graph(
id="column_temporal_exists_2",
figure=px.scatter(pd.DataFrame({"column_temporal": [datetime(2024, 1, 1), datetime(2024, 1, 2)]})),
),
],
)
Vizro._pre_build()
|
Dataframes with column_numerical and column_categorical, which can be different lengths.
|
managers_column_only_exists_in_some
|
python
|
mckinsey/vizro
|
vizro-core/tests/unit/vizro/models/_controls/test_filter.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tests/unit/vizro/models/_controls/test_filter.py
|
Apache-2.0
|
def fetch_extracted_url(source_url: str, pattern: str, headers: dict[str, str]) -> bytes:
"""Look at the file at source_url, search for pattern and then download `url_to_download`."""
response = requests.get(source_url, timeout=TIMEOUT, headers=headers)
response.raise_for_status()
match = re.search(pattern, response.text)
if not match:
sys.exit(f"Could not extract URL to download from {source_url}.")
url_to_download = match["url_to_download"]
print(f"Fetching {url_to_download}...")
response = requests.get(url_to_download, timeout=TIMEOUT)
response.raise_for_status()
return response.content
|
Look at the file at source_url, search for pattern and then download `url_to_download`.
|
fetch_extracted_url
|
python
|
mckinsey/vizro
|
vizro-core/tools/download_static_files.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-core/tools/download_static_files.py
|
Apache-2.0
|
def get_sample_data_info(data_name: Literal["iris", "tips", "stocks", "gapminder"]) -> DFMetaData:
"""If user provides no data, use this tool to get sample data information.
Use the following data for the below purposes:
- iris: mostly numerical with one categorical column, good for scatter, histogram, boxplot, etc.
- tips: contains mix of numerical and categorical columns, good for bar, pie, etc.
- stocks: stock prices, good for line, scatter, generally things that change over time
- gapminder: demographic data, good for line, scatter, generally things with maps or many categories
Args:
data_name: Name of the dataset to get sample data for
Returns:
Data info object containing information about the dataset.
"""
if data_name == "iris":
return IRIS
elif data_name == "tips":
return TIPS
elif data_name == "stocks":
return STOCKS
elif data_name == "gapminder":
return GAPMINDER
|
If user provides no data, use this tool to get sample data information.
Use the following data for the below purposes:
- iris: mostly numerical with one categorical column, good for scatter, histogram, boxplot, etc.
- tips: contains mix of numerical and categorical columns, good for bar, pie, etc.
- stocks: stock prices, good for line, scatter, generally things that change over time
- gapminder: demographic data, good for line, scatter, generally things with maps or many categories
Args:
data_name: Name of the dataset to get sample data for
Returns:
Data info object containing information about the dataset.
|
get_sample_data_info
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def validate_model_config(
dashboard_config: dict[str, Any],
data_infos: list[DFMetaData], # Should be Optional[..]=None, but Cursor complains..
auto_open: bool = True,
) -> ValidationResults:
"""Validate Vizro model configuration. Run ALWAYS when you have a complete dashboard configuration.
If successful, the tool will return the python code and, if it is a remote file, the py.cafe link to the chart.
The PyCafe link will be automatically opened in your default browser if auto_open is True.
Args:
dashboard_config: Either a JSON string or a dictionary representing a Vizro dashboard model configuration
data_infos: List of DFMetaData objects containing information about the data files
auto_open: Whether to automatically open the PyCafe link in a browser
Returns:
ValidationResults object with status and dashboard details
"""
Vizro._reset()
try:
dashboard = vm.Dashboard.model_validate(dashboard_config)
except ValidationError as e:
return ValidationResults(
valid=False,
message=f"Validation Error: {e!s}",
python_code="",
pycafe_url=None,
browser_opened=False,
)
else:
result = get_python_code_and_preview_link(dashboard, data_infos)
pycafe_url = result.pycafe_url if all(info.file_location_type == "remote" for info in data_infos) else None
browser_opened = False
if pycafe_url and auto_open:
try:
browser_opened = webbrowser.open(pycafe_url)
except Exception:
browser_opened = False
return ValidationResults(
valid=True,
message="Configuration is valid for Dashboard!",
python_code=result.python_code,
pycafe_url=pycafe_url,
browser_opened=browser_opened,
)
finally:
Vizro._reset()
|
Validate Vizro model configuration. Run ALWAYS when you have a complete dashboard configuration.
If successful, the tool will return the python code and, if it is a remote file, the py.cafe link to the chart.
The PyCafe link will be automatically opened in your default browser if auto_open is True.
Args:
dashboard_config: Either a JSON string or a dictionary representing a Vizro dashboard model configuration
data_infos: List of DFMetaData objects containing information about the data files
auto_open: Whether to automatically open the PyCafe link in a browser
Returns:
ValidationResults object with status and dashboard details
|
validate_model_config
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def get_model_json_schema(model_name: str) -> dict[str, Any]:
"""Get the JSON schema for the specified Vizro model.
Args:
model_name: Name of the Vizro model to get schema for (e.g., 'Card', 'Dashboard', 'Page')
Returns:
JSON schema of the requested Vizro model
"""
# Dictionary mapping model names to their simplified versions
modified_models = {
"Page": PageSimplified,
"Dashboard": DashboardSimplified,
"Graph": GraphEnhanced,
"AgGrid": AgGridEnhanced,
"Table": AgGridEnhanced,
"Tabs": TabsSimplified,
"Container": ContainerSimplified,
"Filter": FilterSimplified,
"Parameter": ParameterSimplified,
}
# Check if model_name is in the simplified models dictionary
if model_name in modified_models:
return modified_models[model_name].model_json_schema()
# Check if model exists in vizro.models
if not hasattr(vm, model_name):
return {"error": f"Model '{model_name}' not found in vizro.models"}
# Get schema for standard model
model_class = getattr(vm, model_name)
return model_class.model_json_schema()
|
Get the JSON schema for the specified Vizro model.
Args:
model_name: Name of the Vizro model to get schema for (e.g., 'Card', 'Dashboard', 'Page')
Returns:
JSON schema of the requested Vizro model
|
get_model_json_schema
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def get_vizro_chart_or_dashboard_plan(user_plan: Literal["chart", "dashboard"]) -> str:
"""Get instructions for creating a Vizro chart or dashboard. Call FIRST when asked to create Vizro things."""
if user_plan == "chart":
return """
IMPORTANT:
- KEEP IT SIMPLE: rather than iterating yourself, ask the user for more instructions
- ALWAYS VALIDATE:if you iterate over a valid produced solution, make sure to ALWAYS call the
validate_chart_code tool to validate the chart code, display the figure code to the user
- DO NOT modify the background (with plot_bgcolor) or color sequences unless explicitly asked for
Instructions for creating a Vizro chart:
- analyze the datasets needed for the chart using the load_and_analyze_data tool - the most important
information here are the column names and column types
- if the user provides no data, but you need to display a chart or table, use the get_sample_data_info
tool to get sample data information
- create a chart using plotly express and/or plotly graph objects, and call the function `custom_chart`
- call the validate_chart_code tool to validate the chart code, display the figure code to the user (as artifact)
- do NOT call any other tool after, especially do NOT create a dashboard
"""
elif user_plan == "dashboard":
return f"""
IMPORTANT:
- KEEP IT SIMPLE: rather than iterating yourself, ask the user for more instructions
- ALWAYS VALIDATE:if you iterate over a valid produced solution, make sure to ALWAYS call the
validate_model_config tool again to ensure the solution is still valid
- DO NOT show any code or config to the user until you have validated the solution, do not say you are preparing
a solution, just do it and validate it
- IF STUCK: try enquiring the schema of the component in question
Instructions for creating a Vizro dashboard:
- IF the user has no plan (ie no components or pages), use the config at the bottom of this prompt
and validate that solution without any additions, OTHERWISE:
- analyze the datasets needed for the dashboard using the load_and_analyze_data tool - the most
important information here are the column names and column types
- if the user provides no data, but you need to display a chart or table, use the get_sample_data_info
tool to get sample data information
- make a plan of what components you would like to use, then request all necessary schemas
using the get_model_json_schema tool
- assemble your components into a page, then add the page or pages to a dashboard, DO NOT show config or code
to the user until you have validated the solution
- ALWAYS validate the dashboard configuration using the validate_model_config tool
- if you display any code artifact, you must use the above created code, do not add new config to it
Models you can use:
{get_overview_vizro_models()}
Very simple dashboard config:
{get_simple_dashboard_config()}
"""
|
Get instructions for creating a Vizro chart or dashboard. Call FIRST when asked to create Vizro things.
|
get_vizro_chart_or_dashboard_plan
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def load_and_analyze_data(path_or_url: str) -> DataAnalysisResults:
"""Load data from various file formats into a pandas DataFrame and analyze its structure.
Supported formats:
- CSV (.csv)
- JSON (.json)
- HTML (.html, .htm)
- Excel (.xls, .xlsx)
- OpenDocument Spreadsheet (.ods)
- Parquet (.parquet)
Args:
path_or_url: Local file path or URL to a data file
Returns:
DataAnalysisResults object containing DataFrame information and metadata
"""
# Handle files and URLs
path_or_url_type = path_or_url_check(path_or_url)
mime_type, _ = mimetypes.guess_type(str(path_or_url))
processed_path_or_url = path_or_url
if path_or_url_type == "remote":
processed_path_or_url = convert_github_url_to_raw(path_or_url)
elif path_or_url_type == "local":
processed_path_or_url = Path(path_or_url)
else:
return DataAnalysisResults(valid=False, message="Invalid path or URL", df_info=None, df_metadata=None)
try:
df, read_fn = load_dataframe_by_format(processed_path_or_url, mime_type)
except Exception as e:
return DataAnalysisResults(valid=False, message=f"Failed to load data: {e!s}", df_info=None, df_metadata=None)
df_info = get_dataframe_info(df)
df_metadata = DFMetaData(
file_name=Path(path_or_url).stem if isinstance(processed_path_or_url, Path) else Path(path_or_url).name,
file_path_or_url=str(processed_path_or_url),
file_location_type=path_or_url_type,
read_function_string=read_fn,
)
return DataAnalysisResults(valid=True, message="Data loaded successfully", df_info=df_info, df_metadata=df_metadata)
|
Load data from various file formats into a pandas DataFrame and analyze its structure.
Supported formats:
- CSV (.csv)
- JSON (.json)
- HTML (.html, .htm)
- Excel (.xls, .xlsx)
- OpenDocument Spreadsheet (.ods)
- Parquet (.parquet)
Args:
path_or_url: Local file path or URL to a data file
Returns:
DataAnalysisResults object containing DataFrame information and metadata
|
load_and_analyze_data
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def create_starter_dashboard():
"""Prompt template for getting started with Vizro."""
content = f"""
Create a super simple Vizro dashboard with one page and one chart and one filter:
- No need to call any tools except for validate_model_config
- Call this tool with the precise config as shown below
- The PyCafe link will be automatically opened in your default browser
- THEN show the python code after validation, but do not show the PyCafe link
- Be concise, do not explain anything else, just create the dashboard
- Finally ask the user what they would like to do next, then you can call other tools to get more information,
you should then start with the get_chart_or_dashboard_plan tool
{SAMPLE_DASHBOARD_CONFIG}
"""
return content
|
Prompt template for getting started with Vizro.
|
create_starter_dashboard
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def create_eda_dashboard(
file_path_or_url: str,
) -> str:
"""Prompt template for creating an EDA dashboard based on one dataset."""
content = f"""
Create an EDA dashboard based on the following dataset:{file_path_or_url}. Proceed as follows:
1. Analyze the data using the load_and_analyze_data tool first, passing the file path or github url {file_path_or_url}
to the tool.
2. Create a dashboard with 4 pages:
- Page 1: Summary of the dataset using the Card component and the dataset itself using the plain AgGrid component.
- Page 2: Visualizing the distribution of all numeric columns using the Graph component with a histogram.
- use a Parameter that targets the Graph component and the x argument, and you can select the column to
be displayed
- IMPORTANT:remember that you target the chart like: <graph_id>.x and NOT <graph_id>.figure.x
- do not use any color schemes etc.
- add filters for all categorical columns
- Page 3: Visualizing the correlation between all numeric columns using the Graph component with a scatter plot.
- Page 4: Two interesting charts side by side, use the Graph component for this. Make sure they look good
but do not try something beyond the scope of plotly express
"""
return content
|
Prompt template for creating an EDA dashboard based on one dataset.
|
create_eda_dashboard
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def validate_chart_code(
chart_config: ChartPlan,
data_info: DFMetaData,
auto_open: bool = True,
) -> ValidationResults:
"""Validate the chart code created by the user and optionally open the PyCafe link in a browser.
Args:
chart_config: A ChartPlan object with the chart configuration
data_info: Metadata for the dataset to be used in the chart
auto_open: Whether to automatically open the PyCafe link in a browser
Returns:
ValidationResults object with status and dashboard details
"""
Vizro._reset()
try:
chart_plan_obj = ChartPlan.model_validate(chart_config)
except ValidationError as e:
return ValidationResults(
valid=False,
message=f"Validation Error: {e!s}",
python_code="",
pycafe_url=None,
browser_opened=False,
)
else:
dashboard_code = chart_plan_obj.get_dashboard_template(data_info=data_info)
# Generate PyCafe URL if all data is remote
pycafe_url = create_pycafe_url(dashboard_code) if data_info.file_location_type == "remote" else None
browser_opened = False
if auto_open and pycafe_url:
try:
browser_opened = webbrowser.open(pycafe_url)
except Exception:
browser_opened = False
return ValidationResults(
valid=True,
message="Chart only dashboard created successfully!",
python_code=chart_plan_obj.get_chart_code(vizro=True),
pycafe_url=pycafe_url,
browser_opened=browser_opened,
)
finally:
Vizro._reset()
|
Validate the chart code created by the user and optionally open the PyCafe link in a browser.
Args:
chart_config: A ChartPlan object with the chart configuration
data_info: Metadata for the dataset to be used in the chart
auto_open: Whether to automatically open the PyCafe link in a browser
Returns:
ValidationResults object with status and dashboard details
|
validate_chart_code
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def create_vizro_chart(
chart_type: str,
file_path_or_url: Optional[str] = None,
) -> str:
"""Prompt template for creating a Vizro chart."""
content = f"""
- Create a chart using the following chart type: {chart_type}.
- You MUST name the function containing the fig `custom_chart`
- Make sure to analyze the data using the load_and_analyze_data tool first, passing the file path or github url
{file_path_or_url} OR choose the most appropriate sample data using the get_sample_data_info tool.
Then you MUST use the validate_chart_code tool to validate the chart code.
"""
return content
|
Prompt template for creating a Vizro chart.
|
create_vizro_chart
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/server.py
|
Apache-2.0
|
def main():
"""Run the Vizro MCP server - makes charts and dashboards available to AI assistants."""
# Configure logging to show warnings by default
logging.basicConfig(level=logging.WARNING, stream=sys.stderr)
# Run the MCP server
mcp.run()
|
Run the Vizro MCP server - makes charts and dashboards available to AI assistants.
|
main
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/__init__.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/__init__.py
|
Apache-2.0
|
def _strip_markdown(code_string: str) -> str:
"""Remove any code block wrappers (markdown or triple quotes)."""
wrappers = [("```python\n", "```"), ("```py\n", "```"), ("```\n", "```"), ('"""', '"""'), ("'''", "'''")]
for start, end in wrappers:
if code_string.startswith(start) and code_string.endswith(end):
code_string = code_string[len(start) : -len(end)]
break
return code_string.strip()
|
Remove any code block wrappers (markdown or triple quotes).
|
_strip_markdown
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_schemas/schemas.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_schemas/schemas.py
|
Apache-2.0
|
def get_dashboard_template(self, data_info: DFMetaData) -> str:
"""Create a simple dashboard template for displaying the chart.
Args:
data_info: The metadata of the dataset to use.
Returns:
Complete Python code for a Vizro dashboard displaying the chart.
"""
chart_code = self.get_chart_code(vizro=True)
imports = self.get_imports(vizro=True)
# Add the Vizro-specific imports if not present
additional_imports = [
"import vizro.models as vm",
"from vizro import Vizro",
"from vizro.managers import data_manager",
]
# Combine imports without duplicates
all_imports = list(dict.fromkeys(additional_imports + imports.split("\n")))
dashboard_template = f"""
{chr(10).join(imp for imp in all_imports if imp)}
# Load the data
data_manager["{data_info.file_name}"] = {data_info.read_function_string}("{data_info.file_path_or_url}")
# Custom chart code
{chart_code}
# Create a dashboard to display the chart
dashboard = vm.Dashboard(
pages=[
vm.Page(
title="{self.chart_type.capitalize()} Chart",
components=[
vm.Graph(
id="{self.chart_type}_graph",
figure={CUSTOM_CHART_NAME}("{data_info.file_name}"),
)
],
)
],
title="{self.chart_type.capitalize()} Dashboard",
)
# Run the dashboard
Vizro().build(dashboard).run()
"""
return dashboard_template
|
Create a simple dashboard template for displaying the chart.
Args:
data_info: The metadata of the dataset to use.
Returns:
Complete Python code for a Vizro dashboard displaying the chart.
|
get_dashboard_template
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_schemas/schemas.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_schemas/schemas.py
|
Apache-2.0
|
def get_overview_vizro_models() -> dict[str, list[dict[str, str]]]:
"""Get all available models in the vizro.models namespace.
Returns:
Dictionary with categories of models and their descriptions
"""
result: dict[str, list[dict[str, str]]] = {}
for category, models_list in MODEL_GROUPS.items():
result[category] = [
{
"name": model_class.__name__,
"description": (model_class.__doc__ or "No description available").split("\n")[0],
}
for model_class in models_list
]
return result
|
Get all available models in the vizro.models namespace.
Returns:
Dictionary with categories of models and their descriptions
|
get_overview_vizro_models
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_schemas/schemas.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_schemas/schemas.py
|
Apache-2.0
|
def convert_github_url_to_raw(path_or_url: str) -> str:
"""Convert a GitHub URL to a raw URL if it's a GitHub URL, otherwise return the original path or URL."""
github_pattern = r"https?://(?:www\.)?github\.com/([^/]+)/([^/]+)/(?:blob|raw)/([^/]+)/(.+)"
github_match = re.match(github_pattern, path_or_url)
if github_match:
user, repo, branch, file_path = github_match.groups()
return f"https://raw.githubusercontent.com/{user}/{repo}/{branch}/{file_path}"
return path_or_url
|
Convert a GitHub URL to a raw URL if it's a GitHub URL, otherwise return the original path or URL.
|
convert_github_url_to_raw
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_utils/utils.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_utils/utils.py
|
Apache-2.0
|
def load_dataframe_by_format(
path_or_url: Union[str, Path], mime_type: Optional[str] = None
) -> tuple[pd.DataFrame, Literal["pd.read_csv", "pd.read_json", "pd.read_html", "pd.read_excel", "pd.read_parquet"]]:
"""Load a dataframe based on file format determined by MIME type or file extension."""
file_path_str_lower = str(path_or_url).lower()
# Determine format
if mime_type == "text/csv" or file_path_str_lower.endswith(".csv"):
df = pd.read_csv(
path_or_url,
on_bad_lines="warn",
low_memory=False,
)
read_fn = "pd.read_csv"
elif mime_type == "application/json" or file_path_str_lower.endswith(".json"):
df = pd.read_json(path_or_url)
read_fn = "pd.read_json"
elif mime_type == "text/html" or file_path_str_lower.endswith((".html", ".htm")):
tables = pd.read_html(path_or_url)
if not tables:
raise ValueError("No HTML tables found in the provided file or URL")
df = tables[0] # Get the first table by default
read_fn = "pd.read_html"
elif mime_type in [
"application/vnd.ms-excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.oasis.opendocument.spreadsheet",
] or any(file_path_str_lower.endswith(ext) for ext in [".xls", ".xlsx", ".ods"]):
df = pd.read_excel(path_or_url) # opens only sheet 0
read_fn = "pd.read_excel"
elif mime_type == "application/vnd.apache.parquet" or file_path_str_lower.endswith(
".parquet"
): # mime type exists but I did not manage to ever extract it
df = pd.read_parquet(path_or_url)
read_fn = "pd.read_parquet"
else:
raise ValueError("Could not determine file format")
# Check if the result is a Series and convert to DataFrame if needed
if isinstance(df, pd.Series):
df = df.to_frame()
return df, read_fn
|
Load a dataframe based on file format determined by MIME type or file extension.
|
load_dataframe_by_format
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_utils/utils.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_utils/utils.py
|
Apache-2.0
|
def path_or_url_check(string: str) -> str:
"""Check if a string is a link or a file path."""
if string.startswith(("http://", "https://", "www.")):
return "remote"
if Path(string).is_file():
return "local"
return "invalid"
|
Check if a string is a link or a file path.
|
path_or_url_check
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_utils/utils.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_utils/utils.py
|
Apache-2.0
|
def create_pycafe_url(python_code: str) -> str:
"""Create a PyCafe URL for a given Python code."""
# Create JSON object for py.cafe
json_object = {
"code": python_code,
"requirements": "vizro==0.1.38",
"files": [],
}
# Convert to compressed base64 URL
json_text = json.dumps(json_object)
compressed_json_text = gzip.compress(json_text.encode("utf8"))
base64_text = base64.b64encode(compressed_json_text).decode("utf8")
query = urlencode({"c": base64_text}, quote_via=quote)
pycafe_url = f"{PYCAFE_URL}/snippet/vizro/v1?{query}"
return pycafe_url
|
Create a PyCafe URL for a given Python code.
|
create_pycafe_url
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_utils/utils.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_utils/utils.py
|
Apache-2.0
|
def get_python_code_and_preview_link(
model_object: vm.VizroBaseModel, data_infos: list[DFMetaData]
) -> VizroCodeAndPreviewLink:
"""Get the Python code and preview link for a Vizro model object."""
# Get the Python code
python_code = model_object._to_python()
# Add imports after the first empty line
lines = python_code.splitlines()
for i, line in enumerate(lines):
if not line.strip():
# Found first empty line, insert imports here
imports_to_add = [
"from vizro import Vizro",
"import pandas as pd",
"from vizro.managers import data_manager",
]
lines[i:i] = imports_to_add
break
python_code = "\n".join(lines)
# Prepare data loading code
data_loading_code = "\n".join(
f'data_manager["{info.file_name}"] = {info.read_function_string}("{info.file_path_or_url}")'
for info in data_infos
)
# Patterns to identify the data manager section
data_manager_start_marker = "####### Data Manager Settings #####"
data_manager_end_marker = "########### Model code ############"
# Replace everything between the markers with our data loading code
pattern = re.compile(f"{data_manager_start_marker}.*?{data_manager_end_marker}", re.DOTALL)
replacement = f"{data_manager_start_marker}\n{data_loading_code}\n\n{data_manager_end_marker}"
python_code = pattern.sub(replacement, python_code)
# Add final run line
python_code += "\n\nVizro().build(model).run()"
pycafe_url = create_pycafe_url(python_code)
return VizroCodeAndPreviewLink(python_code=python_code, pycafe_url=pycafe_url)
|
Get the Python code and preview link for a Vizro model object.
|
get_python_code_and_preview_link
|
python
|
mckinsey/vizro
|
vizro-mcp/src/vizro_mcp/_utils/utils.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/src/vizro_mcp/_utils/utils.py
|
Apache-2.0
|
def dashboard_config_validation_result() -> ValidationResults:
"""Fixture for a dashboard configuration validation result."""
return ValidationResults(
valid=True,
message="Configuration is valid for Dashboard!",
python_code="""############ Imports ##############
import vizro.models as vm
from vizro import Vizro
import pandas as pd
from vizro.managers import data_manager
########### Model code ############
model = vm.Dashboard(
pages=[
vm.Page(
id="test_page",
components=[vm.Card(id="test_card", type="card", text="Test content")],
title="Test Page",
)
],
title="Test Dashboard",
)
Vizro().build(model).run()""",
pycafe_url="https://py.cafe/snippet/vizro/v1?c=H4sIAFLGG...",
browser_opened=False,
)
|
Fixture for a dashboard configuration validation result.
|
dashboard_config_validation_result
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def graph_dashboard_config() -> dict[str, Any]:
"""Fixture for a dashboard configuration with a scatter graph."""
return {
"title": "Graph Dashboard",
"pages": [
{
"id": "graph_page",
"title": "Scatter Graph Page",
"components": [
{
"id": "scatter_graph",
"type": "graph",
"figure": {
"_target_": "scatter",
"data_frame": "iris_data",
"x": "sepal_length",
"y": "sepal_width",
"color": "species",
"title": "Iris Scatter Plot",
},
}
],
}
],
}
|
Fixture for a dashboard configuration with a scatter graph.
|
graph_dashboard_config
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def graph_dashboard_validation_result() -> ValidationResults:
"""Fixture for a dashboard configuration with graph validation result."""
return ValidationResults(
valid=True,
message="Configuration is valid for Dashboard!",
python_code="""############ Imports ##############
import vizro.plotly.express as px
import vizro.models as vm
from vizro import Vizro
import pandas as pd
from vizro.managers import data_manager
####### Data Manager Settings #####
data_manager["iris_data"] = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/iris-id.csv")
########### Model code ############
model = vm.Dashboard(
pages=[
vm.Page(
id="graph_page",
components=[
vm.Graph(
id="scatter_graph",
type="graph",
figure=px.scatter(
data_frame="iris_data",
x="sepal_length",
y="sepal_width",
color="species",
title="Iris Scatter Plot",
),
)
],
title="Scatter Graph Page",
)
],
title="Graph Dashboard",
)
Vizro().build(model).run()""",
pycafe_url="https://py.cafe/snippet/vizro/v1?c=example-hash",
browser_opened=False,
)
|
Fixture for a dashboard configuration with graph validation result.
|
graph_dashboard_validation_result
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def invalid_chart_plan() -> dict[str, Any]:
"""Fixture for an invalid chart plan."""
return {
"chart_type": "scatter",
"imports": ["import pandas as pd", "import plotly.express as px"],
"chart_code": """def scatter_chart(data_frame):
return px.scatter(data_frame, x="sepal_length", y="sepal_width", color="species", title="Iris Scatter Plot")""",
}
|
Fixture for an invalid chart plan.
|
invalid_chart_plan
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def chart_plan_validation_result() -> ValidationResults:
"""Fixture for a chart plan validation result."""
return ValidationResults(
valid=True,
message="Chart only dashboard created successfully!",
python_code="""@capture('graph')
def custom_chart(data_frame):
return px.scatter(data_frame, x="sepal_length", y="sepal_width", color="species", title="Iris Scatter Plot")""",
pycafe_url="https://py.cafe/snippet/vizro/v1?c=...",
browser_opened=False,
)
|
Fixture for a chart plan validation result.
|
chart_plan_validation_result
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def test_successful_validation(
self, valid_dashboard_config: dict[str, Any], dashboard_config_validation_result: ValidationResults
) -> None:
"""Test successful validation of a dashboard configuration."""
result = validate_model_config(dashboard_config=valid_dashboard_config, data_infos=[], auto_open=False)
# Compare everything but the pycafe_url
assert result.valid == dashboard_config_validation_result.valid
assert result.message == dashboard_config_validation_result.message
assert result.python_code == dashboard_config_validation_result.python_code
assert result.browser_opened == dashboard_config_validation_result.browser_opened
# For the URL, just check it has the right format
assert result.pycafe_url is not None
assert result.pycafe_url.startswith("https://py.cafe/snippet/vizro/v1?c=")
|
Test successful validation of a dashboard configuration.
|
test_successful_validation
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def test_graph_dashboard_validation(
self,
graph_dashboard_config: dict[str, Any],
graph_dashboard_validation_result: ValidationResults,
iris_metadata: DFMetaData,
) -> None:
"""Test validation of a dashboard with a scatter graph component."""
result = validate_model_config(
dashboard_config=graph_dashboard_config, data_infos=[iris_metadata], auto_open=False
)
# Compare everything but the pycafe_url
assert result.valid == graph_dashboard_validation_result.valid
assert result.message == graph_dashboard_validation_result.message
assert result.python_code == graph_dashboard_validation_result.python_code
assert result.browser_opened == graph_dashboard_validation_result.browser_opened
# For the URL, just check it has the right format
assert result.pycafe_url is not None
assert result.pycafe_url.startswith("https://py.cafe/snippet/vizro/v1?c=")
|
Test validation of a dashboard with a scatter graph component.
|
test_graph_dashboard_validation
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def test_validation_error(self, valid_dashboard_config: dict[str, Any], iris_metadata: DFMetaData) -> None:
"""Test validation error for an invalid dashboard configuration."""
# Create an invalid config by removing a required field
invalid_config = valid_dashboard_config.copy()
invalid_config["titles"] = invalid_config.pop("title")
result = validate_model_config(dashboard_config=invalid_config, data_infos=[iris_metadata], auto_open=False)
assert result.valid is False
assert "Validation Error: 1 validation error for Dashboard" in result.message
assert result.python_code == ""
assert result.pycafe_url is None
assert result.browser_opened is False
|
Test validation error for an invalid dashboard configuration.
|
test_validation_error
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def test_successful_validation(
self,
valid_chart_plan: dict[str, Any],
iris_metadata: DFMetaData,
chart_plan_validation_result: ValidationResults,
) -> None:
"""Test successful validation of chart code."""
result = validate_chart_code(chart_config=valid_chart_plan, data_info=iris_metadata, auto_open=False)
# Compare everything but the pycafe_url
assert result.valid == chart_plan_validation_result.valid
assert result.message == chart_plan_validation_result.message
assert result.python_code == chart_plan_validation_result.python_code
assert result.browser_opened == chart_plan_validation_result.browser_opened
# For the URL, just check it has the right format
assert result.pycafe_url is not None
assert result.pycafe_url.startswith("https://py.cafe/snippet/vizro/v1?c=")
|
Test successful validation of chart code.
|
test_successful_validation
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def test_validation_error(
self,
invalid_chart_plan: dict[str, Any],
iris_metadata: DFMetaData,
) -> None:
"""Test validation error for an invalid chart plan."""
result = validate_chart_code(chart_config=invalid_chart_plan, data_info=iris_metadata, auto_open=False)
assert result.valid is False
assert result.python_code == ""
assert result.pycafe_url is None
assert result.browser_opened is False
assert "Validation Error: 1 validation error for ChartPlan" in result.message
|
Test validation error for an invalid chart plan.
|
test_validation_error
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def test_model_json_schema(self, model_name: str, model_class: type) -> None:
"""Test getting JSON schema for various models."""
schema = get_model_json_schema(model_name=model_name)
# Get the schema directly from the model class
expected_schema = model_class.model_json_schema()
# Compare the schemas
assert schema == expected_schema
|
Test getting JSON schema for various models.
|
test_model_json_schema
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def test_nonexistent_model(self) -> None:
"""Test getting schema for a nonexistent model."""
schema = get_model_json_schema("NonExistentModel")
assert isinstance(schema, dict)
assert "error" in schema
assert "not found" in schema["error"]
|
Test getting schema for a nonexistent model.
|
test_nonexistent_model
|
python
|
mckinsey/vizro
|
vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-mcp/tests/unit/vizro_mcp/test_server.py
|
Apache-2.0
|
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
# Download the model weights
if not os.path.exists(MODEL_CACHE):
download_weights(MODEL_URL, MODEL_CACHE)
# Soft links for the auxiliary models
os.system("mkdir -p ~/.cache/torch/hub/checkpoints")
os.system(
"ln -s $(pwd)/checkpoints/auxiliary/vgg16-397923af.pth ~/.cache/torch/hub/checkpoints/vgg16-397923af.pth"
)
|
Load the model into memory to make running multiple predictions efficient
|
setup
|
python
|
bytedance/LatentSync
|
predict.py
|
https://github.com/bytedance/LatentSync/blob/master/predict.py
|
Apache-2.0
|
def predict(
self,
video: Path = Input(description="Input video", default=None),
audio: Path = Input(description="Input audio to ", default=None),
guidance_scale: float = Input(description="Guidance scale", ge=1, le=3, default=2.0),
inference_steps: int = Input(description="Inference steps", ge=20, le=50, default=20),
seed: int = Input(description="Set to 0 for Random seed", default=0),
) -> Path:
"""Run a single prediction on the model"""
if seed <= 0:
seed = int.from_bytes(os.urandom(2), "big")
print(f"Using seed: {seed}")
video_path = str(video)
audio_path = str(audio)
config_path = "configs/unet/stage2.yaml"
ckpt_path = "checkpoints/latentsync_unet.pt"
output_path = "/tmp/video_out.mp4"
# Run the following command:
os.system(
f"python -m scripts.inference --unet_config_path {config_path} --inference_ckpt_path {ckpt_path} --guidance_scale {str(guidance_scale)} --video_path {video_path} --audio_path {audio_path} --video_out_path {output_path} --seed {seed} --inference_steps {inference_steps}"
)
return Path(output_path)
|
Run a single prediction on the model
|
predict
|
python
|
bytedance/LatentSync
|
predict.py
|
https://github.com/bytedance/LatentSync/blob/master/predict.py
|
Apache-2.0
|
def resnet50_backbone(lda_out_channels, in_chn, pretrained=False, **kwargs):
"""Constructs a ResNet-50 model_hyper.
Args:
pretrained (bool): If True, returns a model_hyper pre-trained on ImageNet
"""
model = ResNetBackbone(lda_out_channels, in_chn, Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
save_model = model_zoo.load_url(model_urls['resnet50'])
model_dict = model.state_dict()
state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()}
model_dict.update(state_dict)
model.load_state_dict(model_dict)
else:
model.apply(weights_init_xavier)
return model
|
Constructs a ResNet-50 model_hyper.
Args:
pretrained (bool): If True, returns a model_hyper pre-trained on ImageNet
|
resnet50_backbone
|
python
|
bytedance/LatentSync
|
eval/hyper_iqa.py
|
https://github.com/bytedance/LatentSync/blob/master/eval/hyper_iqa.py
|
Apache-2.0
|
def nms_(dets, thresh):
"""
Courtesy of Ross Girshick
[https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py]
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1) * (y2 - y1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(int(i))
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return np.array(keep).astype(np.int32)
|
Courtesy of Ross Girshick
[https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py]
|
nms_
|
python
|
bytedance/LatentSync
|
eval/detectors/s3fd/box_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/eval/detectors/s3fd/box_utils.py
|
Apache-2.0
|
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
|
Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
|
decode
|
python
|
bytedance/LatentSync
|
eval/detectors/s3fd/box_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/eval/detectors/s3fd/box_utils.py
|
Apache-2.0
|
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep, 0
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
with warnings.catch_warnings():
# Ignore UserWarning within this block
warnings.simplefilter("ignore", category=UserWarning)
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w * h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter / union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
|
Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
|
nms
|
python
|
bytedance/LatentSync
|
eval/detectors/s3fd/box_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/eval/detectors/s3fd/box_utils.py
|
Apache-2.0
|
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
|
set_attention_slice
|
python
|
bytedance/LatentSync
|
latentsync/models/unet.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/models/unet.py
|
Apache-2.0
|
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor = None,
class_labels: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
# support controlnet
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[UNet3DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info("Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# time
timesteps = timestep
if not torch.is_tensor(timesteps):
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = emb + class_emb
# pre-process
sample = self.conv_in(sample)
# down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
)
else:
sample, res_samples = downsample_block(
hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states
)
down_block_res_samples += res_samples
# support controlnet
down_block_res_samples = list(down_block_res_samples)
if down_block_additional_residuals is not None:
for i, down_block_additional_residual in enumerate(down_block_additional_residuals):
if down_block_additional_residual.dim() == 4: # boardcast
down_block_additional_residual = down_block_additional_residual.unsqueeze(2)
down_block_res_samples[i] = down_block_res_samples[i] + down_block_additional_residual
# mid
sample = self.mid_block(
sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask
)
# support controlnet
if mid_block_additional_residual is not None:
if mid_block_additional_residual.dim() == 4: # boardcast
mid_block_additional_residual = mid_block_additional_residual.unsqueeze(2)
sample = sample + mid_block_additional_residual
# up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
upsample_size=upsample_size,
attention_mask=attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
encoder_hidden_states=encoder_hidden_states,
)
# post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet3DConditionOutput(sample=sample)
|
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
|
forward
|
python
|
bytedance/LatentSync
|
latentsync/models/unet.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/models/unet.py
|
Apache-2.0
|
def get_random_clip_from_video(self, idx: int) -> tuple:
'''
Sample a random clip starting index from the video.
Args:
idx: Index of the video.
'''
# Note that some videos may not contain enough frames, we skip those videos here.
while self._clips.clips[idx].shape[0] <= 0:
idx += 1
n_clip = self._clips.clips[idx].shape[0]
clip_id = random.randint(0, n_clip - 1)
return idx, clip_id
|
Sample a random clip starting index from the video.
Args:
idx: Index of the video.
|
get_random_clip_from_video
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/data_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/data_utils.py
|
Apache-2.0
|
def load_video_frames(self, dataroot: str) -> list:
'''
Loads all the video frames under the dataroot and returns a list of all the video frames.
Args:
dataroot: The root directory containing the video frames.
Returns:
A list of all the video frames.
'''
data_all = []
frame_list = os.walk(dataroot)
for _, meta in enumerate(frame_list):
root = meta[0]
try:
frames = sorted(meta[2], key=lambda item: int(item.split('.')[0].split('_')[-1]))
except:
print(meta[0], meta[2])
if len(frames) < max(0, self.sequence_length * self.sample_every_n_frames):
continue
frames = [
os.path.join(root, item) for item in frames
if is_image_file(item)
]
if len(frames) > max(0, self.sequence_length * self.sample_every_n_frames):
data_all.append(frames)
return data_all
|
Loads all the video frames under the dataroot and returns a list of all the video frames.
Args:
dataroot: The root directory containing the video frames.
Returns:
A list of all the video frames.
|
load_video_frames
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/data_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/data_utils.py
|
Apache-2.0
|
def getTensor(self, index: int) -> torch.Tensor:
'''
Returns a tensor of the video frames at the given index.
Args:
index: The index of the video frames to return.
Returns:
A BCTHW tensor in the range `[0, 1]` of the video frames at the given index.
'''
video = self.data_all[index]
video_len = len(video)
# load the entire video when sequence_length = -1, whiel the sample_every_n_frames has to be 1
if self.sequence_length == -1:
assert self.sample_every_n_frames == 1
start_idx = 0
end_idx = video_len
else:
n_frames_interval = self.sequence_length * self.sample_every_n_frames
start_idx = random.randint(0, video_len - n_frames_interval)
end_idx = start_idx + n_frames_interval
img = Image.open(video[0])
h, w = img.height, img.width
if h > w:
half = (h - w) // 2
cropsize = (0, half, w, half + w) # left, upper, right, lower
elif w > h:
half = (w - h) // 2
cropsize = (half, 0, half + h, h)
images = []
for i in range(start_idx, end_idx,
self.sample_every_n_frames):
path = video[i]
img = Image.open(path)
if h != w:
img = img.crop(cropsize)
img = img.resize(
(self.resolution, self.resolution),
Image.ANTIALIAS)
img = np.asarray(img, dtype=np.float32)
img /= 255.
img_tensor = preprocess_image(img).unsqueeze(0)
images.append(img_tensor)
video_clip = torch.cat(images).permute(3, 0, 1, 2)
return video_clip
|
Returns a tensor of the video frames at the given index.
Args:
index: The index of the video frames to return.
Returns:
A BCTHW tensor in the range `[0, 1]` of the video frames at the given index.
|
getTensor
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/data_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/data_utils.py
|
Apache-2.0
|
def set_num_features(self, num_features: int):
'''
Set the number of features diminsions.
Args:
num_features: Number of features diminsions.
'''
if self.num_features is not None:
assert num_features == self.num_features
else:
self.num_features = num_features
self.all_features = []
self.raw_mean = np.zeros([num_features], dtype=np.float64)
self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64)
|
Set the number of features diminsions.
Args:
num_features: Number of features diminsions.
|
set_num_features
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/metric_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/metric_utils.py
|
Apache-2.0
|
def append(self, x: np.ndarray):
'''
Add the newly computed features to the list. Update the mean and covariance.
Args:
x: New features to record.
'''
x = np.asarray(x, dtype=np.float32)
assert x.ndim == 2
if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items):
if self.num_items >= self.max_items:
return
x = x[:self.max_items - self.num_items]
self.set_num_features(x.shape[1])
self.num_items += x.shape[0]
if self.capture_all:
self.all_features.append(x)
if self.capture_mean_cov:
x64 = x.astype(np.float64)
self.raw_mean += x64.sum(axis=0)
self.raw_cov += x64.T @ x64
|
Add the newly computed features to the list. Update the mean and covariance.
Args:
x: New features to record.
|
append
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/metric_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/metric_utils.py
|
Apache-2.0
|
def append_torch(self, x: torch.Tensor, rank: int, num_gpus: int):
'''
Add the newly computed PyTorch features to the list. Update the mean and covariance.
Args:
x: New features to record.
rank: Rank of the current GPU.
num_gpus: Total number of GPUs.
'''
assert isinstance(x, torch.Tensor) and x.ndim == 2
assert 0 <= rank < num_gpus
if num_gpus > 1:
ys = []
for src in range(num_gpus):
y = x.clone()
torch.distributed.broadcast(y, src=src)
ys.append(y)
x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples
self.append(x.cpu().numpy())
|
Add the newly computed PyTorch features to the list. Update the mean and covariance.
Args:
x: New features to record.
rank: Rank of the current GPU.
num_gpus: Total number of GPUs.
|
append_torch
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/metric_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/metric_utils.py
|
Apache-2.0
|
def get_all(self) -> np.ndarray:
'''
Get all the stored features as NumPy Array.
Returns:
Concatenation of the stored features.
'''
assert self.capture_all
return np.concatenate(self.all_features, axis=0)
|
Get all the stored features as NumPy Array.
Returns:
Concatenation of the stored features.
|
get_all
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/metric_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/metric_utils.py
|
Apache-2.0
|
def get_mean_cov(self) -> Tuple[np.ndarray, np.ndarray]:
'''
Get the mean and covariance of the stored features.
Returns:
Mean and covariance of the stored features.
'''
assert self.capture_mean_cov
mean = self.raw_mean / self.num_items
cov = self.raw_cov / self.num_items
cov = cov - np.outer(mean, mean)
return mean, cov
|
Get the mean and covariance of the stored features.
Returns:
Mean and covariance of the stored features.
|
get_mean_cov
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/metric_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/metric_utils.py
|
Apache-2.0
|
def load(pkl_file: str) -> 'FeatureStats':
'''
Load the features and statistics from a pickle file.
Args:
pkl_file: Path to the pickle file.
'''
with open(pkl_file, 'rb') as f:
s = pickle.load(f)
obj = FeatureStats(capture_all=s['capture_all'], max_items=s['max_items'])
obj.__dict__.update(s)
print('Loaded %d features from %s' % (obj.num_items, pkl_file))
return obj
|
Load the features and statistics from a pickle file.
Args:
pkl_file: Path to the pickle file.
|
load
|
python
|
bytedance/LatentSync
|
latentsync/trepa/utils/metric_utils.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/trepa/utils/metric_utils.py
|
Apache-2.0
|
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram"""
pad = fsize - fshift
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
|
Compute number of time frames of spectrogram
|
num_frames
|
python
|
bytedance/LatentSync
|
latentsync/utils/audio.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/utils/audio.py
|
Apache-2.0
|
def __getitem__(self, idx):
"""Get audio samples and video frame at `idx`.
Parameters
----------
idx : int or slice
The frame index, can be negative which means it will index backwards,
or slice of frame indices.
Returns
-------
(ndarray/list of ndarray, ndarray)
First element is samples of shape CxS or a list of length N containing samples of shape CxS,
where N is the number of frames, C is the number of channels,
S is the number of samples of the corresponding frame.
Second element is Frame of shape HxWx3 or batch of image frames with shape NxHxWx3,
where N is the length of the slice.
"""
assert self.__video_reader is not None and self.__audio_reader is not None
if isinstance(idx, slice):
return self.get_batch(range(*idx.indices(len(self.__video_reader))))
if idx < 0:
idx += len(self.__video_reader)
if idx >= len(self.__video_reader) or idx < 0:
raise IndexError("Index: {} out of bound: {}".format(idx, len(self.__video_reader)))
audio_start_idx, audio_end_idx = self.__video_reader.get_frame_timestamp(idx)
audio_start_idx = self.__audio_reader._time_to_sample(audio_start_idx)
audio_end_idx = self.__audio_reader._time_to_sample(audio_end_idx)
results = (self.__audio_reader[audio_start_idx:audio_end_idx], self.__video_reader[idx])
self.__video_reader.seek(0)
return results
|
Get audio samples and video frame at `idx`.
Parameters
----------
idx : int or slice
The frame index, can be negative which means it will index backwards,
or slice of frame indices.
Returns
-------
(ndarray/list of ndarray, ndarray)
First element is samples of shape CxS or a list of length N containing samples of shape CxS,
where N is the number of frames, C is the number of channels,
S is the number of samples of the corresponding frame.
Second element is Frame of shape HxWx3 or batch of image frames with shape NxHxWx3,
where N is the length of the slice.
|
__getitem__
|
python
|
bytedance/LatentSync
|
latentsync/utils/av_reader.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/utils/av_reader.py
|
Apache-2.0
|
def get_batch(self, indices):
"""Get entire batch of audio samples and video frames.
Parameters
----------
indices : list of integers
A list of frame indices. If negative indices detected, the indices will be indexed from backward
Returns
-------
(list of ndarray, ndarray)
First element is a list of length N containing samples of shape CxS,
where N is the number of frames, C is the number of channels,
S is the number of samples of the corresponding frame.
Second element is Frame of shape HxWx3 or batch of image frames with shape NxHxWx3,
where N is the length of the slice.
"""
assert self.__video_reader is not None and self.__audio_reader is not None
indices = self._validate_indices(indices)
audio_arr = []
prev_video_idx = None
prev_audio_end_idx = None
for idx in list(indices):
frame_start_time, frame_end_time = self.__video_reader.get_frame_timestamp(idx)
# timestamp and sample conversion could have some error that could cause non-continuous audio
# we detect if retrieving continuous frame and make the audio continuous
if prev_video_idx and idx == prev_video_idx + 1:
audio_start_idx = prev_audio_end_idx
else:
audio_start_idx = self.__audio_reader._time_to_sample(frame_start_time)
audio_end_idx = self.__audio_reader._time_to_sample(frame_end_time)
audio_arr.append(self.__audio_reader[audio_start_idx:audio_end_idx])
prev_video_idx = idx
prev_audio_end_idx = audio_end_idx
results = (audio_arr, self.__video_reader.get_batch(indices))
self.__video_reader.seek(0)
return results
|
Get entire batch of audio samples and video frames.
Parameters
----------
indices : list of integers
A list of frame indices. If negative indices detected, the indices will be indexed from backward
Returns
-------
(list of ndarray, ndarray)
First element is a list of length N containing samples of shape CxS,
where N is the number of frames, C is the number of channels,
S is the number of samples of the corresponding frame.
Second element is Frame of shape HxWx3 or batch of image frames with shape NxHxWx3,
where N is the length of the slice.
|
get_batch
|
python
|
bytedance/LatentSync
|
latentsync/utils/av_reader.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/utils/av_reader.py
|
Apache-2.0
|
def _validate_indices(self, indices):
"""Validate int64 integers and convert negative integers to positive by backward search"""
assert self.__video_reader is not None and self.__audio_reader is not None
indices = np.array(indices, dtype=np.int64)
# process negative indices
indices[indices < 0] += len(self.__video_reader)
if not (indices >= 0).all():
raise IndexError("Invalid negative indices: {}".format(indices[indices < 0] + len(self.__video_reader)))
if not (indices < len(self.__video_reader)).all():
raise IndexError("Out of bound indices: {}".format(indices[indices >= len(self.__video_reader)]))
return indices
|
Validate int64 integers and convert negative integers to positive by backward search
|
_validate_indices
|
python
|
bytedance/LatentSync
|
latentsync/utils/av_reader.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/utils/av_reader.py
|
Apache-2.0
|
def cuda_to_int(cuda_str: str) -> int:
"""
Convert the string with format "cuda:X" to integer X.
"""
if cuda_str == "cuda":
return 0
device = torch.device(cuda_str)
if device.type != "cuda":
raise ValueError(f"Device type must be 'cuda', got: {device.type}")
return device.index
|
Convert the string with format "cuda:X" to integer X.
|
cuda_to_int
|
python
|
bytedance/LatentSync
|
latentsync/utils/face_detector.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/utils/face_detector.py
|
Apache-2.0
|
def get_sliced_feature(self, feature_array, vid_idx, fps=25):
"""
Get sliced features based on a given index
:param feature_array:
:param start_idx: the start index of the feature
:param audio_feat_length:
:return:
"""
length = len(feature_array)
selected_feature = []
selected_idx = []
center_idx = int(vid_idx * 50 / fps)
left_idx = center_idx - self.audio_feat_length[0] * 2
right_idx = center_idx + (self.audio_feat_length[1] + 1) * 2
for idx in range(left_idx, right_idx):
idx = max(0, idx)
idx = min(length - 1, idx)
x = feature_array[idx]
selected_feature.append(x)
selected_idx.append(idx)
selected_feature = torch.cat(selected_feature, dim=0)
selected_feature = selected_feature.reshape(-1, self.embedding_dim) # 50*384
return selected_feature, selected_idx
|
Get sliced features based on a given index
:param feature_array:
:param start_idx: the start index of the feature
:param audio_feat_length:
:return:
|
get_sliced_feature
|
python
|
bytedance/LatentSync
|
latentsync/whisper/audio2feature.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/audio2feature.py
|
Apache-2.0
|
def get_sliced_feature_sparse(self, feature_array, vid_idx, fps=25):
"""
Get sliced features based on a given index
:param feature_array:
:param start_idx: the start index of the feature
:param audio_feat_length:
:return:
"""
length = len(feature_array)
selected_feature = []
selected_idx = []
for dt in range(-self.audio_feat_length[0], self.audio_feat_length[1] + 1):
left_idx = int((vid_idx + dt) * 50 / fps)
if left_idx < 1 or left_idx > length - 1:
left_idx = max(0, left_idx)
left_idx = min(length - 1, left_idx)
x = feature_array[left_idx]
x = x[np.newaxis, :, :]
x = np.repeat(x, 2, axis=0)
selected_feature.append(x)
selected_idx.append(left_idx)
selected_idx.append(left_idx)
else:
x = feature_array[left_idx - 1 : left_idx + 1]
selected_feature.append(x)
selected_idx.append(left_idx - 1)
selected_idx.append(left_idx)
selected_feature = np.concatenate(selected_feature, axis=0)
selected_feature = selected_feature.reshape(-1, self.embedding_dim) # 50*384
selected_feature = torch.from_numpy(selected_feature)
return selected_feature, selected_idx
|
Get sliced features based on a given index
:param feature_array:
:param start_idx: the start index of the feature
:param audio_feat_length:
:return:
|
get_sliced_feature_sparse
|
python
|
bytedance/LatentSync
|
latentsync/whisper/audio2feature.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/audio2feature.py
|
Apache-2.0
|
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
|
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
|
load_audio
|
python
|
bytedance/LatentSync
|
latentsync/whisper/whisper/audio.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/whisper/audio.py
|
Apache-2.0
|
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
|
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
|
pad_or_trim
|
python
|
bytedance/LatentSync
|
latentsync/whisper/whisper/audio.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/whisper/audio.py
|
Apache-2.0
|
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
|
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
|
mel_filters
|
python
|
bytedance/LatentSync
|
latentsync/whisper/whisper/audio.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/whisper/audio.py
|
Apache-2.0
|
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
|
log_mel_spectrogram
|
python
|
bytedance/LatentSync
|
latentsync/whisper/whisper/audio.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/whisper/audio.py
|
Apache-2.0
|
def detect_language(model: "Whisper", mel: Tensor, tokenizer: Tokenizer = None) -> Tuple[Tensor, List[dict]]:
"""
Detect the spoken language in the audio, and return them as list of strings, along with the ids
of the most probable language tokens and the probability distribution over all language tokens.
This is performed outside the main decode loop in order to not interfere with kv-caching.
Returns
-------
language_tokens : Tensor, shape = (n_audio,)
ids of the most probable language tokens, which appears after the startoftranscript token.
language_probs : List[Dict[str, float]], length = n_audio
list of dictionaries containing the probability distribution over all languages.
"""
if tokenizer is None:
tokenizer = get_tokenizer(model.is_multilingual)
if tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence:
raise ValueError(f"This model doesn't have language tokens so it can't perform lang id")
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
# skip encoder forward pass if already-encoded audio features were given
if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
mel = model.encoder(mel)
# forward pass using a single token, startoftranscript
n_audio = mel.shape[0]
x = torch.tensor([[tokenizer.sot]] * n_audio).to(mel.device) # [n_audio, 1]
logits = model.logits(x, mel)[:, 0]
# collect detected languages; suppress all non-language tokens
mask = torch.ones(logits.shape[-1], dtype=torch.bool)
mask[list(tokenizer.all_language_tokens)] = False
logits[:, mask] = -np.inf
language_tokens = logits.argmax(dim=-1)
language_token_probs = logits.softmax(dim=-1).cpu()
language_probs = [
{
c: language_token_probs[i, j].item()
for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes)
}
for i in range(n_audio)
]
if single:
language_tokens = language_tokens[0]
language_probs = language_probs[0]
return language_tokens, language_probs
|
Detect the spoken language in the audio, and return them as list of strings, along with the ids
of the most probable language tokens and the probability distribution over all language tokens.
This is performed outside the main decode loop in order to not interfere with kv-caching.
Returns
-------
language_tokens : Tensor, shape = (n_audio,)
ids of the most probable language tokens, which appears after the startoftranscript token.
language_probs : List[Dict[str, float]], length = n_audio
list of dictionaries containing the probability distribution over all languages.
|
detect_language
|
python
|
bytedance/LatentSync
|
latentsync/whisper/whisper/decoding.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/whisper/decoding.py
|
Apache-2.0
|
def finalize(
self, tokens: Tensor, sum_logprobs: Tensor
) -> Tuple[Sequence[Sequence[Tensor]], List[List[float]]]:
"""Finalize search and return the final candidate sequences
Parameters
----------
tokens : Tensor, shape = (n_audio, n_group, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence
sum_logprobs : Tensor, shape = (n_audio, n_group)
cumulative log probabilities for each sequence
Returns
-------
tokens : Sequence[Sequence[Tensor]], length = n_audio
sequence of Tensors containing candidate token sequences, for each audio input
sum_logprobs : List[List[float]], length = n_audio
sequence of cumulative log probabilities corresponding to the above
"""
raise NotImplementedError
|
Finalize search and return the final candidate sequences
Parameters
----------
tokens : Tensor, shape = (n_audio, n_group, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence
sum_logprobs : Tensor, shape = (n_audio, n_group)
cumulative log probabilities for each sequence
Returns
-------
tokens : Sequence[Sequence[Tensor]], length = n_audio
sequence of Tensors containing candidate token sequences, for each audio input
sum_logprobs : List[List[float]], length = n_audio
sequence of cumulative log probabilities corresponding to the above
|
finalize
|
python
|
bytedance/LatentSync
|
latentsync/whisper/whisper/decoding.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/whisper/decoding.py
|
Apache-2.0
|
def decode(model: "Whisper", mel: Tensor, options: DecodingOptions = DecodingOptions()) -> Union[DecodingResult, List[DecodingResult]]:
"""
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
Parameters
----------
model: Whisper
the Whisper model instance
mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
A tensor containing the Mel spectrogram(s)
options: DecodingOptions
A dataclass that contains all necessary options for decoding 30-second segments
Returns
-------
result: Union[DecodingResult, List[DecodingResult]]
The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
"""
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
result = DecodingTask(model, options).run(mel)
if single:
result = result[0]
return result
|
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
Parameters
----------
model: Whisper
the Whisper model instance
mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
A tensor containing the Mel spectrogram(s)
options: DecodingOptions
A dataclass that contains all necessary options for decoding 30-second segments
Returns
-------
result: Union[DecodingResult, List[DecodingResult]]
The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
|
decode
|
python
|
bytedance/LatentSync
|
latentsync/whisper/whisper/decoding.py
|
https://github.com/bytedance/LatentSync/blob/master/latentsync/whisper/whisper/decoding.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.