licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 1676 | export list_guild_emojis,
get_guild_emoji,
create_guild_emoji,
modify_guild_emoji,
delete_guild_emoji
"""
list_guild_emojis(c::Client, guild::Integer) -> Vector{Emoji}
Get the [`Emoji`](@ref)s in a [`Guild`](@ref).
"""
function list_guild_emojis(c::Client, guild::Integer)
return Response{Vector{Emoji}}(c, :GET, "/guilds/$guild/emojis")
end
"""
get_guild_emoji(c::Client, guild::Integer, emoji::Integer) -> Emoji
Get an [`Emoji`](@ref) in a [`Guild`](@ref).
"""
function get_guild_emoji(c::Client, guild::Integer, emoji::Integer)
return Response{Emoji}(c, :GET, "/guilds/$guild/emojis/$emoji")
end
"""
create_guild_emoji(c::Client, guild::Integer; kwargs...) -> Emoji
Create an [`Emoji`](@ref) in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/emoji#create-guild-emoji).
"""
function create_guild_emoji(c::Client, guild::Integer; kwargs...)
return Response{Emoji}(c, :POST, "/guilds/$guild/emojis"; body=kwargs)
end
"""
modify_guild_emoji(c::Client, guild::Integer, emoji::Integer; kwargs...) -> Emoji
Edit an [`Emoji`](@ref) in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/emoji#modify-guild-emoji).
"""
function modify_guild_emoji(c::Client, guild::Integer, emoji::Integer; kwargs...)
return Response{Emoji}(c, :PATCH, "/guilds/$guild/emojis/$emoji"; body=kwargs)
end
"""
delete_guild_emoji(c::Client, guild::Integer, emoji::Integer)
Delete an [`Emoji`](@ref) from a [`Guild`](@ref).
"""
function delete_guild_emoji(c::Client, guild::Integer, emoji::Integer)
return Response(c, :DELETE, "/guilds/$guild/emojis/$emoji")
end
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 193 | include("audit_log.jl")
include("channel.jl")
include("emoji.jl")
include("guild.jl")
include("invite.jl")
include("user.jl")
include("voice.jl")
include("webhook.jl")
include("interaction.jl") | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 12880 | export create_guild,
get_guild,
modify_guild,
delete_guild,
get_guild_channels,
create_guild_channel,
modify_guild_channel_positions,
get_guild_member,
list_guild_members,
add_guild_member,
modify_guild_member,
modify_current_user_nick,
add_guild_member_role,
remove_guild_member_role,
remove_guild_member,
get_guild_bans,
get_guild_ban,
create_guild_ban,
remove_guild_ban,
get_guild_roles,
create_guild_role,
modify_guild_role_positions,
modify_guild_role,
delete_guild_role,
get_guild_prune_count,
begin_guild_prune,
get_guild_voice_regions,
get_guild_invites,
get_guild_integrations,
create_guild_integration,
modify_guild_integration,
delete_guild_integration,
sync_guild_integration,
get_guild_embed,
modify_guild_embed,
get_vanity_url,
get_guild_widget_image
"""
create_guild(c::Client; kwargs...) -> Guild
Create a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#create-guild).
"""
function create_guild(c::Client; kwargs...)
return Response{Guild}(c, :POST, "/guilds"; body=kwargs)
end
"""
get_guild(c::Client, guild::Integer) -> Guild
Get a [`Guild`](@ref).
"""
function get_guild(c::Client, guild::Integer)
return Response{Guild}(c, :GET, "/guilds/$guild")
end
"""
modify_guild(c::Client, guild::Integer; kwargs...) -> Guild
Edit a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-guild).
"""
function modify_guild(c::Client, guild::Integer; kwargs...)
return Response{Guild}(c, :PATCH, "/guilds/$guild"; body=kwargs)
end
"""
delete_guild(c::Client, guild::Integer)
Delete a [`Guild`](@ref).
"""
function delete_guild(c::Client, guild::Integer)
return Response(c, :DELETE, "/guilds/$guild")
end
"""
get_guild_channels(c::Client, guild::Integer) -> Vector{DiscordChannel}
Get the [`DiscordChannel`](@ref)s in a [`Guild`](@ref).
"""
function get_guild_channels(c::Client, guild::Integer)
return Response{Vector{DiscordChannel}}(c, :GET, "/guilds/$guild/channels")
end
"""
create_guild_channel(c::Client, guild::Integer; kwargs...) -> DiscordChannel
Create a [`DiscordChannel`](@ref) in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#create-guild-channel).
"""
function create_guild_channel(c::Client, guild::Integer; kwargs...)
return Response{DiscordChannel}(c, :POST, "/guilds/$guild/channels"; body=kwargs)
end
"""
modify_guild_channel_positions(c::Client, guild::Integer, positions...)
Modify the positions of [`DiscordChannel`](@ref)s in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-guild-channel-positions).
"""
function modify_guild_channel_positions(c::Client, guild::Integer, positions...)
return Response(c, :PATCH, "/guilds/$guild/channels"; body=positions)
end
"""
get_guild_member(c::Client, guild::Integer, user::Integer) -> Member
Get a [`Member`](@ref) in a [`Guild`](@ref).
"""
function get_guild_member(c::Client, guild::Integer, user::Integer)
return Response{Member}(c, :GET, "/guilds/$guild/members/$user")
end
"""
list_guild_members(c::Client, guild::Integer; kwargs...) -> Vector{Member}
Get a list of [`Member`](@ref)s in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#list-guild-members).
"""
function list_guild_members(c::Client, guild::Integer; kwargs...)
return Response{Vector{Member}}(c, :GET, "/guilds/$guild/members"; kwargs...)
end
"""
add_guild_member(c::Client; kwargs...) -> Member
Add a [`User`](@ref) to a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#add-guild-member).
"""
function add_guild_member(c::Client, guild::Integer, user::Integer; kwargs...)
return Response{Member}(c, :PUT, "/guilds/$guild/members/$user"; body=kwargs)
end
"""
modify_guild__member(c::Client, guild::Integer, user::Integer; kwargs...)
Modify a [`Member`](@ref) in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-guild-member).
"""
function modify_guild_member(c::Client, guild::Integer, user::Integer; kwargs...)
return Response(c, :PATCH, "/guilds/$guild/members/$user"; body=kwargs)
end
"""
modify_current_user_nick(c::Client, guild::Intger; kwargs...) -> String
Modify the [`Client`](@ref) user's nickname in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-current-user-nick).
"""
function modify_current_user_nick(c::Client, guild::Integer; kwargs...)
return Response{String}(c, :PATCH, "/guilds/$guild/members/@me/nick"; body=kwargs)
end
"""
add_guild_member_role(c::Client, guild::Integer, user::Integer, role::Integer)
Add a [`Role`](@ref) to a [`Member`](@ref).
"""
function add_guild_member_role(c::Client, guild::Integer, user::Integer, role::Integer)
return Response(c, :PUT, "/guilds/$guild/members/$user/roles/$role")
end
"""
remove_guild_member_role(c::Client, guild::Integer, user::Integer, role::Integer)
Remove a [`Role`](@ref) from a [`Member`](@ref).
"""
function remove_guild_member_role(c::Client, guild::Integer, user::Integer, role::Integer)
return Response(c, :DELETE, "/guilds/$guild/members/$user/roles/$role")
end
"""
remove_guild_member(c::Client, guild::Integer, user::Integer)
Kick a [`Member`](@ref) from a [`Guild`](@ref).
"""
function remove_guild_member(c::Client, guild::Integer, user::Integer)
return Response(c, :DELETE, "/guilds/$guild/members/$user")
end
"""
get_guild_bans(c::Client, guild::Integer) -> Vector{Ban}
Get a list of [`Ban`](@ref)s in a [`Guild`](@ref).
"""
function get_guild_bans(c::Client, guild::Integer)
return Response{Vector{Ban}}(c, :GET, "/guilds/$guild/bans")
end
"""
get_ban(c::Client, guild::Integer, user::Integer) -> Ban
Get a [`Ban`](@ref) in a [`Guild`](@ref).
"""
function get_guild_ban(c::Client, guild::Integer, user::Integer)
return Response{Ban}(c, :GET, "/guilds/$guild/bans/$user")
end
"""
create_guild_ban(c::Client, guild::Integer, user::Integer; kwargs...)
Ban a [`Member`](@ref) from a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#create-guild-ban).
"""
function create_guild_ban(c::Client, guild::Integer, user::Integer; kwargs...)
return Response(c, :PUT, "/guilds/$guild/bans/$user"; kwargs...)
end
"""
remove_guild_ban(c::Client, guild::Integer, user::Integer)
Unban a [`User`](@ref) from a [`Guild`](@ref).
"""
function remove_guild_ban(c::Client, guild::Integer, user::Integer)
return Response(c, :DELETE, "/guilds/$guild/bans/$user")
end
"""
get_guild_roles(c::Client, guild::Integer) -> Vector{Role}
Get a [`Guild`](@ref)'s [`Role`](@ref)s.
"""
function get_guild_roles(c::Client, guild::Integer)
return Response{Vector{Role}}(c, :GET, "/guilds/$guild/roles")
end
"""
create_guild_role(c::Client, guild::Integer; kwargs) -> Role
Create a [`Role`](@ref) in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#create-guild-role).
"""
function create_guild_role(c::Client, guild::Integer; kwargs...)
return Response{Role}(c, :POST, "/guilds/$guild/roles"; body=kwargs)
end
"""
modify_guild_role_positions(c::Client, guild::Integer, positions...) -> Vector{Role}
Modify the positions of [`Role`](@ref)s in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-guild-role-positions).
"""
function modify_guild_role_positions(c::Client, guild::Integer, positions...)
return Response{Vector{Role}}(c, :PATCH, "/guilds/$guild/roles"; body=positions)
end
"""
modify_guild_role(c::Client, guild::Integer, role::Integer; kwargs) -> Role
Modify a [`Role`](@ref) in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-guild-role).
"""
function modify_guild_role(c::Client, guild::Integer, role::Integer; kwargs...)
return Response{Role}(c, :PATCH, "/guilds/$guild/roles/$role"; body=kwargs)
end
"""
delete_guild_role(c::Client, guild::Integer, role::Integer)
Delete a [`Role`](@ref) from a [`Guild`](@ref).
"""
function delete_guild_role(c::Client, guild::Integer, role::Integer)
return Response(c, :DELETE, "/guilds/$guild/roles/$role")
end
"""
get_guild_prune_count(c::Client, guild::Integer; kwargs...) -> Dict
Get the number of [`Member`](@ref)s that would be removed from a [`Guild`](@ref) in a prune.
More details [here](https://discordapp.com/developers/docs/resources/guild#get-guild-prune-count).
"""
function get_guild_prune_count(c::Client, guild::Integer; kwargs...)
return Response{Dict}(c, :GET, "/guilds/$guild/prune"; kwargs...)
end
"""
begin_guild_prune(c::Client, guild::Integer; kwargs...) -> Dict
Begin pruning [`Member`](@ref)s from a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#begin-guild-prune).
"""
function begin_guild_prune(c::Client, guild::Integer; kwargs...)
return Response{Dict}(c, :POST, "/guilds/$guild/prune"; kwargs...)
end
"""
get_guild_voice_regions(c::Client, guild::Integer) -> Vector{VoiceRegion}
Get a list of [`VoiceRegion`](@ref)s for the [`Guild`](@ref).
"""
function get_guild_voice_regions(c::Client, guild::Integer)
return Response{Vector{VoiceRegion}}(c, :GET, "/guilds/$guild/regions")
end
"""
get_guild_invites(c::Client, guild::Integer) -> Vector{Invite}
Get a list of [`Invite`](@ref)s to a [`Guild`](@ref).
"""
function get_guild_invites(c::Client, guild::Integer)
return Response{Vector{Invite}}(c, :GET, "/guilds/$guild/invites")
end
"""
get_guild_integrations(c::Client, guild::Integer) -> Vector{Integration}
Get a list of [`Integration`](@ref)s for a [`Guild`](@ref).
"""
function get_guild_integrations(c::Client, guild::Integer)
return Response{Vector{Integration}}(c, :GET, "/guilds/$guild/integrations")
end
"""
create_guild_integration(c::Client, guild::Integer; kwargs...)
Create/attach an [`Integration`](@ref) to a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#create-guild-integration).
"""
function create_guild_integration(c::Client, guild::Integer; kwargs...)
return Response{Integration}(c, :POST, "/guilds/$guild/integrations"; body=kwargs)
end
"""
modify_guild_integration(c::Client, guild::Integer, integration::Integer; kwargs...)
Modify an [`Integration`](@ref) in a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-guild-integration).
"""
function modify_guild_integration(
c::Client,
guild::Integer,
integration::Integer;
kwargs...,
)
return Response(c, :PATCH, "/guilds/$guild/integrations/$integration"; body=kwargs)
end
"""
delete_guild_integration(c::Client, guild::Integer, integration::Integer)
Delete an [`Integration`](@ref) from a [`Guild`](@ref).
"""
function delete_guild_integration(c::Client, guild::Integer, integration::Integer)
return Response(c, :DELETE, "/guilds/$guild/integrations/$integration")
end
"""
sync_guild_integration(c::Client, guild::Integer, integration::Integer)
Sync an [`Integration`](@ref) in a [`Guild`](@ref).
"""
function sync_guild_integration(c::Client, guild::Integer, integration::Integer)
return Response(c, :POST, "/guilds/$guild/integrations/$integration/sync")
end
"""
get_guild_embed(c::Client, guild::Integer) -> GuildEmbed
Get a [`Guild`](@ref)'s [`GuildEmbed`](@ref).
"""
function get_guild_embed(c::Client, guild::Integer)
return Response{GuildEmbed}(c, :GET, "/guilds/$guild/embed")
end
"""
modify_guild_embed(c::Client, guild::Integer; kwargs...) -> GuildEmbed
Modify a [`Guild`](@ref)'s [`GuildEmbed`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/guild#modify-guild-embed).
"""
function modify_guild_embed(c::Client, guild::Integer; kwargs...)
return Response{GuildEmbed}(c, :PATCH, "/guilds/$guild/embed"; body=kwargs)
end
"""
get_vanity_url(c::Client, guild::Integer) -> Invite
Get a [`Guild`](@ref)'s vanity URL, if it supports that feature.
"""
function get_vanity_url(c::Client, guild::Integer)
return Response{Invite}(c, :GET, "/guilds/$guild/vanity-url")
end
"""
get_guild_widget_image(c::Client, guild::Integer; kwargs...) -> Vector{UInt8}
Get a [`Guild`](@ref)'s widget image in PNG format.
More details [here](https://discordapp.com/developers/docs/resources/guild#get-guild-widget-image).
"""
function get_guild_widget_image(c::Client, guild::Integer; kwargs...)
return Response{Vector{UInt8}}(c, :GET, "/guilds/$guild/widget.png"; kwargs...)
end
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 4816 | """
create_application_command(c::Client; kwargs...) -> ApplicationCommand
Creates a global [`ApplicationCommand`](@ref).
"""
function create_application_command(c::Client; kwargs...)
return Response{ApplicationCommand}(c, :POST, "/applications/$appid/commands"; body=kwargs)
end
"""
create_application_command(c::Client, guild::Snowflake; kwargs...) -> ApplicationCommand
Creates a guild [`ApplicationCommand`](@ref).
"""
function create_application_command(c::Client, guild::Snowflake; kwargs...)
appid = c.application_id
return Response{ApplicationCommand}(c, :POST, "/applications/$appid/guilds/$guild/commands"; body=kwargs)
end
"""
get_application_commands(c::Client) -> Vector{ApplicationCommand}
Gets all global [`ApplicationCommand`](@ref)s for the logged in client.
"""
function get_application_commands(c::Client)
appid = c.application_id
return Response{Vector{ApplicationCommand}}(c, :GET, "/applications/$appid/commands")
end
"""
get_application_commands(c::Client, guild::Snowflake) -> Vector{ApplicationCommand}
Gets all guild [`ApplicationCommand`](@ref)s for the logged in client.
"""
function get_application_commands(c::Client, guild::Snowflake)
appid = c.application_id
return Response{Vector{ApplicationCommand}}(c, :GET, "/applications/$appid/guilds/$guild/commands")
end
"""
respond_to_interaction(c::Client, int_id::Snowflake, int_token::String; kwargs...) -> Message
Respond to an interaction with code 4.
"""
function respond_to_interaction(c::Client, int_id::Snowflake, int_token::String; kwargs...)
dict = Dict{Symbol, Any}(
:data => kwargs,
:type => 4,
)
return Response{Message}(c, :POST, "/interactions/$int_id/$int_token/callback"; body=dict)
end
function respond_to_interaction_with_a_modal(c::Client, int_id::Snowflake, int_token::String; kwargs...)
dict = Dict{Symbol, Any}(
:data => kwargs,
:type => 9,
)
return Response{Message}(c, :POST, "/interactions/$int_id/$int_token/callback"; body=dict)
end
"""
ack_interaction(c::Client, int_id::Snowflake, int_token::String; kwargs...)
Respond to an interaction with code 5.
"""
function ack_interaction(c::Client, int_id::Snowflake, int_token::String; kwargs...)
dict = Dict{Symbol, Any}(
:type => 5,
)
return Response(c, :POST, "/interactions/$int_id/$int_token/callback"; body=dict)
end
"""
update_ack_interaction(c::Client, int_id::Snowflake, int_token::String; kwargs...) -> Message
Respond to an interaction with code 6.
"""
function update_ack_interaction(c::Client, int_id::Snowflake, int_token::String; kwargs...)
dict = Dict{Symbol, Any}(
:type => 6,
)
return Response{Message}(c, :POST, "/interactions/$int_id/$int_token/callback"; body=dict)
end
"""
update_message_int(c::Client, int_id::Snowflake, int_token::String; kwargs...)
Respond to an interaction with code 7.
"""
function update_message_int(c::Client, int_id::Snowflake, int_token::String; kwargs...)
dict = Dict{Symbol, Any}(
:data => kwargs,
:type => 7,
)
return Response(c, :POST, "/interactions/$int_id/$int_token/callback"; body=dict)
end
"""
create_followup_message(c::Client, int_id::Snowflake, int_token::String; kwargs...) -> Message
Creates a followup message for an interaction.
"""
function create_followup_message(c::Client, int_id::Snowflake, int_token::String; kwargs...)
appid = c.application_id
return Response{Message}(c, :POST, "/webhooks/$appid/$int_token)"; body=kwargs)
end
"""
edit_interaction(c::Client, int_id::Snowflake, int_token::String; kwargs...)
Edit a followup message for an interaction.
"""
function edit_interaction(c::Client, int_token::String, mid::Snowflake; kwargs...)
appid = c.application_id
return Response(c, :PATCH, "/webhooks/$appid/$int_token/messages/$mid"; body=kwargs)
end
"""
bulk_overwrite_application_commands(c::Client, guild::Snowflake, cmds::Vector{ApplicationCommand}) -> Vector{ApplicationCommand}
Overwrites global [`ApplicationCommand`](@ref)s with the given cmds vector.
"""
function bulk_overwrite_application_commands(c::Client, guild::Snowflake, cmds::Vector{ApplicationCommand})
appid = c.application_id
return Response{Vector{ApplicationCommand}}(c, :PUT, "/applications/$appid/guilds/$guild/commands"; body=cmds)
end
"""
bulk_overwrite_application_commands(c::Client, guild::Snowflake, cmds::Vector{ApplicationCommand}) -> Vector{ApplicationCommand}
Overwrites guild [`ApplicationCommand`](@ref)s with the given cmds vector.
"""
function bulk_overwrite_application_commands(c::Client, cmds::Vector{ApplicationCommand})
appid = c.application_id
return Response{Vector{ApplicationCommand}}(c, :PUT, "/applications/$appid/commands"; body=cmds)
end | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 635 | export get_invite,
delete_invite
"""
get_invite(c::Client, invite::AbstractString; kwargs...} -> Invite
Get an [`Invite`](@ref) to a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/invite#get-invite).
"""
function get_invite(c::Client, invite::AbstractString; kwargs...)
return Response{Invite}(c, :GET, "/invites/$invite"; kwargs...)
end
"""
delete_invite(c::Client, invite::AbstractString) -> Invite
Delete an [`Invite`](@ref) to a [`Guild`](@ref).
"""
function delete_invite(c::Client, invite::AbstractString)
return Response{Invite}(c, :DELETE, "/invites/$invite")
end
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 1698 | export get_current_user,
get_user,
modify_current_user,
get_current_user_guilds,
leave_guild,
create_dm
"""
get_current_user(c::Client) -> User
Get the [`Client`](@ref) [`User`](@ref).
"""
function get_current_user(c::Client)
return Response{User}(c, :GET, "/users/@me")
end
"""
get_user(c::Client, user::Integer) -> User
Get a [`User`](@ref).
"""
function get_user(c::Client, user::Integer)
return Response{User}(c, :GET, "/users/$user")
end
"""
modify_current_user(c::Client; kwargs...) -> User
Modify the [`Client`](@ref) [`User`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/user#modify-current-user).
"""
function modify_current_user(c::Client; kwargs...)
return Response{User}(c, :PATCH, "/users/@me"; body=kwargs)
end
"""
get_user_guilds(c::Client; kwargs...) -> Vector{Guild}
Get a list of [`Guild`](@ref)s the [`Client`](@ref) [`User`](@ref) is a member of.
More details [here](https://discordapp.com/developers/docs/resources/user#get-current-user-guilds).
"""
function get_current_user_guilds(c::Client; kwargs...)
return Response{Vector{Guild}}(c, :GET, "/users/@me/guilds"; kwargs...)
end
"""
leave_guild(c::Client, guild::Integer)
Leave a [`Guild`](@ref).
"""
function leave_guild(c::Client, guild::Integer)
return Response(c, :DELETE, "/users/@me/guilds/$guild")
end
"""
create_dm(c::Client; kwargs...) -> DiscordChannel
Create a DM [`DiscordChannel`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/user#create-dm).
"""
function create_dm(c::Client; kwargs...)
return Response{DiscordChannel}(c, :POST, "/users/@me/channels"; body=kwargs)
end
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 294 | export list_voice_regions
"""
list_voice_regions(c::Client) -> Vector{VoiceRegion}
Get a list of the [`VoiceRegion`](@ref)s that can be used when creating [`Guild`](@ref)s.
"""
function list_voice_regions(c::Client)
return Response{Vector{VoiceRegion}}(c, :GET, "/voice/regions")
end
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 4876 | export create_webhook,
get_channel_webhooks,
get_guild_webhooks,
get_webhook,
get_webhook_with_token,
modify_webhook,
modify_webhook_with_token,
delete_webhook,
delete_webhook_with_token,
execute_webhook,
execute_slack_compatible_webhook,
execute_github_compatible_webhook
"""
create_webhook(c::Client, channel::Integer; kwargs...) -> Webhook
Create a [`Webhook`](@ref) in a [`DiscordChannel`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/webhook#create-webhook).
"""
function create_webhook(c::Client, channel::Integer; kwargs...)
return Response{Webhook}(c, :POST, "/channels/$channel/webhooks"; body=kwargs)
end
"""
get_channel_webhooks(c::Client, channel::Integer) -> Vector{Webhook}
Get a list of [`Webhook`](@ref)s in a [`DiscordChannel`](@ref).
"""
function get_channel_webhooks(c::Client, channel::Integer)
return Response{Vector{Webhook}}(c, :GET, "/channels/$channel/webhooks")
end
"""
get_guild_webhooks(c::Client, guild::Integer) -> Vector{Webhook}
Get a list of [`Webhook`](@ref)s in a [`Guild`](@ref).
"""
function get_guild_webhooks(c::Client, guild::Integer)
return Response{Vector{Webhook}}(c, :GET, "/guilds/$guild/webhooks")
end
"""
get_webhook(c::Client, webhook::Integer) -> Webhook
Get a [`Webhook`](@ref).
"""
function get_webhook(c::Client, webhook::Integer)
return Response{Webhook}(c, :GET, "/webhooks/$webhook")
end
"""
get_webhook_with_token(c::Client, webhook::Integer, token::AbstractString) -> Webhook
Get a [`Webhook`](@ref) with a token.
"""
function get_webhook_with_token(c::Client, webhook::Integer, token::AbstractString)
return Response{Webhook}(c, :GET, "/webhooks/$webhook/$token")
end
"""
modify_webhook(c::Client, webhook::Integer; kwargs...) -> Webhook
Modify a [`Webhook`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/webhook#modify-webhook).
"""
function modify_webhook(c::Client, webhook::Integer; kwargs...)
return Response{Webhook}(c, :PATCH, "/webhooks/$webhook"; body=kwargs)
end
"""
modify_webhook_with_token(
c::Client,
webhook::Integer,
token::AbstractString;
kwargs...,
) -> Webhook
Modify a [`Webhook`](@ref) with a token.
More details [here](https://discordapp.com/developers/docs/resources/webhook#modify-webhook).
"""
function modify_webhook_with_token(
c::Client,
webhook::Integer,
token::AbstractString;
kwargs...,
)
return Response{Webhook}(c, :PATCH, "/webhooks/$webhook/$token"; body=kwargs)
end
"""
delete_webhook(c::Client, webhook::Integer)
Delete a [`Webhook`](@ref).
"""
function delete_webhook(c::Client, webhook::Integer)
return Response(c, :DELETE, "/webhooks/$webhook")
end
"""
delete_webhook_with_token(c::Client, webhook::Integer, token::AbstractString)
Delete a [`Webhook`](@ref) with a token.
"""
function delete_webhook_with_token(c::Client, webhook::Integer, token::AbstractString)
return Response(c, :DELETE, "/webhooks/$webhook/$token")
end
"""
execute_webhook(
c::Client,
webhook::Integer,
token::AbstractString;
wait::Bool=false,
kwargs...,
) -> Message
Execute a [`Webhook`](@ref). If `wait` is not set, no [`Message`](@ref) is returned.
More details [here](https://discordapp.com/developers/docs/resources/webhook#execute-webhook).
"""
function execute_webhook(
c::Client,
webhook::Integer,
token::AbstractString;
wait::Bool=false,
kwargs...,
)
return Response{Message}(c, :POST, "/webhooks/$webhook/$token"; body=kwargs, wait=wait)
end
"""
execute_slack_compatible_webhook(
c::Client,
webhook::Integer,
token::AbstractString;
wait::Bool=true,
kwargs...,
)
Execute a Slack [`Webhook`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/webhook#execute-slackcompatible-webhook).
"""
function execute_slack_compatible_webhook(
c::Client,
webhook::Integer,
token::AbstractString;
wait::Bool=true,
kwargs...,
)
return Response{Message}(
c,
:POST,
"/webhooks/$webhook/$token/slack";
body=kwargs,
wait=wait,
)
end
"""
execute_github_compatible_webhook(
c::Client,
webhook::Integer,
token::AbstractString;
wait::Bool=true,
kwargs...,
)
Execute a Github [`Webhook`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/webhook#execute-githubcompatible-webhook).
"""
function execute_github_compatible_webhook(
c::Client,
webhook::Integer,
token::AbstractString;
wait::Bool=true,
kwargs...,
)
return Response{Message}(
c,
:POST,
"/webhooks/$webhook/$token/github";
body=kwargs,
wait=wait,
)
end
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 2416 | export Activity
"""
The start and stop times of an [`Activity`](@ref).
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-timestamps).
"""
struct ActivityTimestamps
start::Optional{DateTime}
stop::Optional{DateTime}
end
@boilerplate ActivityTimestamps :constructors :docs :lower :merge :mock
"""
Emoji for a custom [`Activity`](@ref).
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-emoji).
"""
struct ActivityEmoji <: DiscordObject
name::String
id::Optional{Snowflake}
animated::Optional{Bool}
end
@boilerplate ActivityEmoji :constructors :docs :lower :merge :mock
"""
The current party of an [`Activity`](@ref)'s player.
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-party).
"""
struct ActivityParty
id::Optional{String}
size::Optional{Vector{Int}}
end
@boilerplate ActivityParty :constructors :docs :lower :merge :mock
"""
Images and hover text for an [`Activity`](@ref).
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-assets).
"""
struct ActivityAssets
large_image::Optional{String}
large_text::Optional{String}
small_image::Optional{String}
small_text::Optional{String}
end
@boilerplate ActivityAssets :constructors :docs :lower :merge :mock
"""
Secrets for Rich Presence joining and spectating of an [`Activity`](@ref).
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-secrets).
"""
struct ActivitySecrets
join::Optional{String}
spectate::Optional{String}
match::Optional{String}
end
@boilerplate ActivitySecrets :constructors :docs :lower :merge :mock
"""
A [`User`](@ref) activity.
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object).
"""
struct Activity
name::String
type::Int
url::OptionalNullable{String}
timestamps::Optional{ActivityTimestamps}
application_id::Optional{Snowflake}
details::OptionalNullable{String}
state::OptionalNullable{String}
emoji::OptionalNullable{ActivityEmoji}
party::Optional{ActivityParty}
assets::Optional{ActivityAssets}
secrets::Optional{ActivitySecrets}
instance::Optional{Bool}
flags::Optional{Int}
end
@boilerplate Activity :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 382 | """
A [`Message`](@ref) attachment.
More details [here](https://discordapp.com/developers/docs/resources/channel#attachment-object).
"""
struct Attachment <: DiscordObject
id::Snowflake
filename::String
size::Int
url::String
proxy_url::String
height::Optional{Int}
width::Optional{Int}
end
@boilerplate Attachment :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 6139 | export AuditLog
const AUDIT_LOG_CHANGE_TYPES = Dict(
"name" => (String, Guild),
"icon_hash" => (String, Guild) ,
"splash_hash" => (String, Guild),
"owner_id" => (Snowflake, Guild),
"region" => (String, Guild),
"afk_channel_id" => (Snowflake, Guild),
"afk_timeout" => (Int, Guild),
"mfa_level" => (Int, Guild),
"verification_level" => (Int, Guild),
"explicit_content_filter" => (Int, Guild),
"default_message_notifications" => (Int, Guild),
"vanity_url_code" => (String, Guild),
"\$add" => (Vector{Role}, Guild),
"\$remove" => (Vector{Role}, Guild),
"prune_delete_days" => (Int, Guild),
"widget_enabled" => (Bool, Guild),
"widget_channel_id" => (Snowflake, Guild),
"position" => (Int, DiscordChannel),
"topic" => (String, DiscordChannel),
"bitrate" => (Int, DiscordChannel),
"permission_overwrites" => (Vector{Overwrite}, DiscordChannel),
"nsfw" => (Bool, DiscordChannel),
"application_id" => (Snowflake, DiscordChannel),
"permissions" => (String, Role),
"color" => (Int, Role),
"hoist" => (Bool, Role),
"mentionable" => (Bool, Role),
"allow" => (Int, Role),
"deny" => (Int, Role),
"code" => (String, Invite),
"channel_id" => (Snowflake, Invite),
"inviter_id" => (Snowflake, Invite),
"max_uses" => (Int, Invite),
"uses" => (Int, Invite),
"max_age" => (Int, Invite),
"temporary" => (Bool, Invite),
"deaf" => (Bool, User),
"mute" => (Bool, User),
"nick" => (String, User),
"avatar_hash" => (String, User),
"id" => (Snowflake, Any),
"type" => (Any, Any),
# Undocumented.
"rate_limit_per_user" => (Int, DiscordChannel),
)
"""
A change item in an [`AuditLogEntry`](@ref).
The first type parameter is the type of `new_value` and `old_value`. The second is the type
of the entity that `new_value` and `old_value` belong(ed) to.
More details [here](https://discordapp.com/developers/docs/resources/audit-log#audit-log-change-object).
"""
struct AuditLogChange{T, U}
new_value::Optional{T}
old_value::Optional{T}
key::String
type::Type{U}
end
@boilerplate AuditLogChange :docs :mock
AuditLogChange(d::Dict{Symbol, Any}) = AuditLogChange(; d...)
function AuditLogChange(; kwargs...)
return if haskey(AUDIT_LOG_CHANGE_TYPES, kwargs[:key])
T, U = AUDIT_LOG_CHANGE_TYPES[kwargs[:key]]
func = if T === Any
identity
elseif T === Snowflake
snowflake
elseif T <: Vector
eltype(T)
else
T
end
new_value = if haskey(kwargs, :new_value)
if kwargs[:new_value] isa Vector
func.(kwargs[:new_value])
else
func(kwargs[:new_value])
end
else
missing
end
old_value = if haskey(kwargs, :old_value)
if kwargs[:old_value] isa Vector
func.(kwargs[:old_value])
else
func(kwargs[:old_value])
end
else
missing
end
AuditLogChange{T, U}(new_value, old_value, kwargs[:key], U)
else
AuditLogChange{Any, Any}(
get(kwargs, :new_value, missing),
get(kwargs, :old_value, missing),
kwargs[:key],
Any,
)
end
end
"""
Optional information in an [`AuditLogEntry`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/audit-log#audit-log-entry-object-optional-audit-entry-info).
"""
struct AuditLogOptions <: DiscordObject
delete_member_days::Optional{Int}
members_removed::Optional{Int}
channel_id::Optional{Snowflake}
count::Optional{Int}
id::Optional{Snowflake}
type::Optional{Int}
role_name::Optional{String}
end
@boilerplate AuditLogOptions :docs :merge :mock
AuditLogOptions(d::Dict{Symbol, Any}) = AuditLogOptions(; d...)
function AuditLogOptions(; kwargs...)
dmd = if haskey(kwargs, :delete_member_days)
parse(Int, kwargs[:delete_member_days])
else
missing
end
return AuditLogOptions(
dmd,
haskey(kwargs, :members_removed) ? parse(Int, kwargs[:members_removed]) : missing,
haskey(kwargs, :channel_id) ? snowflake(kwargs[:channel_id]) : missing,
haskey(kwargs, :count) ? parse(Int, kwargs[:count]) : missing,
haskey(kwargs, :id) ? snowflake(kwargs[:id]) : missing,
haskey(kwargs, :type) ? OverwriteType(kwargs[:type]) : missing,
get(kwargs, :role_name, missing),
)
end
"""
An entry in an [`AuditLog`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/audit-log#audit-log-entry-object).
"""
struct AuditLogEntry
target_id::Nullable{Snowflake}
changes::Optional{Vector{AuditLogChange}}
user_id::Snowflake
id::Snowflake
action_type::Int
options::Optional{AuditLogOptions}
reason::Optional{String}
end
@boilerplate AuditLogEntry :constructors :docs :lower :merge :mock
"""
An audit log.
More details [here](https://discordapp.com/developers/docs/resources/audit-log#audit-log-object).
"""
struct AuditLog
webhooks::Vector{Webhook}
users::Vector{User}
audit_log_entries::Vector{AuditLogEntry}
end
@boilerplate AuditLog :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 246 | export Ban
"""
A [`User`](@ref) ban.
More details [here](https://discordapp.com/developers/docs/resources/guild#ban-object).
"""
struct Ban
reason::Nullable{String}
user::User
end
@boilerplate Ban :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 982 | export DiscordChannel
"""
A Discord channel.
More details [here](https://discordapp.com/developers/docs/resources/channel#channel-object).
Note: The name `Channel` is already used, hence the prefix.
"""
struct DiscordChannel <: DiscordObject
id::Snowflake
type::Optional{Int}
guild_id::Optional{Snowflake}
position::Optional{Int}
permission_overwrites::Optional{Vector{Overwrite}}
name::Optional{String}
topic::OptionalNullable{String}
nsfw::Optional{Bool}
last_message_id::OptionalNullable{Snowflake}
bitrate::Optional{Int}
user_limit::Optional{Int}
rate_limit_per_user::Optional{Int}
recipients::Optional{Vector{User}}
icon::OptionalNullable{String}
owner_id::Optional{Snowflake}
application_id::Optional{Snowflake}
parent_id::OptionalNullable{Snowflake}
last_pin_timestamp::OptionalNullable{DateTime} # Not supposed to be nullable.
end
@boilerplate DiscordChannel :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 369 | """
A [`User`](@ref) connection to an external service (Twitch, YouTube, etc.).
More details [here](https://discordapp.com/developers/docs/resources/user#connection-object).
"""
struct Connection
id::String
name::String
type::String
revoked::Bool
integrations::Vector{Integration}
end
@boilerplate Connection :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 3190 | export EmbedThumbnail,
EmbedVideo,
EmbedImage,
EmbedProvider,
EmbedAuthor,
EmbedFooter,
EmbedField,
Embed
"""
An [`Embed`](@ref)'s thumbnail image information.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object-embed-thumbnail-structure).
"""
struct EmbedThumbnail
url::Optional{String}
proxy_url::Optional{String}
height::Optional{Int}
width::Optional{Int}
end
@boilerplate EmbedThumbnail :constructors :docs :lower :merge :mock
"""
An [`Embed`](@ref)'s video information.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object-embed-video-structure).
"""
struct EmbedVideo
url::Optional{String}
height::Optional{Int}
width::Optional{Int}
end
@boilerplate EmbedVideo :constructors :docs :lower :merge :mock
"""
An [`Embed`](@ref)'s image information.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object-embed-image-structure).
"""
struct EmbedImage
url::Optional{String}
proxy_url::Optional{String}
height::Optional{Int}
width::Optional{Int}
end
@boilerplate EmbedImage :constructors :docs :lower :merge :mock
"""
An [`Embed`](@ref)'s provider information.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object-embed-provider-structure).
"""
struct EmbedProvider
name::Optional{String}
url::OptionalNullable{String} # Not supposed to be nullable.
end
@boilerplate EmbedProvider :constructors :docs :lower :merge :mock
"""
An [`Embed`](@ref)'s author information.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object-embed-author-structure).
"""
struct EmbedAuthor
name::Optional{String}
url::Optional{String}
icon_url::Optional{String}
proxy_icon_url::Optional{String}
end
@boilerplate EmbedAuthor :constructors :docs :lower :merge :mock
"""
An [`Embed`](@ref)'s footer information.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object-embed-footer-structure).
"""
struct EmbedFooter
text::String
icon_url::Optional{String}
proxy_icon_url::Optional{String}
end
@boilerplate EmbedFooter :constructors :docs :lower :merge :mock
"""
An [`Embed`](@ref) field.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object-embed-field-structure).
"""
struct EmbedField
name::String
value::String
inline::Optional{Bool}
end
@boilerplate EmbedField :constructors :docs :lower :merge :mock
"""
A [`Message`](@ref) embed.
More details [here](https://discordapp.com/developers/docs/resources/channel#embed-object).
"""
struct Embed
title::Optional{String}
type::Optional{String}
description::Optional{String}
url::Optional{String}
timestamp::Optional{DateTime}
color::Optional{Int}
footer::Optional{EmbedFooter}
image::Optional{EmbedImage}
thumbnail::Optional{EmbedThumbnail}
video::Optional{EmbedVideo}
provider::Optional{EmbedProvider}
author::Optional{EmbedAuthor}
fields::Optional{Vector{EmbedField}}
end
@boilerplate Embed :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 416 | export Emoji
"""
An emoji.
More details [here](https://discordapp.com/developers/docs/resources/emoji#emoji-object).
"""
struct Emoji <: DiscordObject
id::Nullable{Snowflake}
name::String
roles::Optional{Vector{Snowflake}}
user::Optional{User}
require_colons::Optional{Bool}
managed::Optional{Bool}
animated::Optional{Bool}
end
@boilerplate Emoji :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 576 | struct NamingError <: Exception
invalid_name::AbstractString
name_of::String
reason::String
end
struct FieldRequired <: Exception
field::String
structname::String
end
NamingError(a::AbstractString, nameof::String) = NamingError(a, nameof, "it may be too long or contain invalid characters.")
Base.showerror(io::IO, e::NamingError) = print(io, "Name $(e.invalid_name) is an invalid name for `$(e.name_of)` because `$(e.reason)`.")
Base.showerror(io::IO, e::FieldRequired) = print(io, "Field `$(e.field)` is required to construct a `$(e.structname)`.")
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 2724 | export Guild
"""
A Discord guild (server).
Can either be an [`UnavailableGuild`](@ref) or a [`Guild`](@ref).
"""
abstract type AbstractGuild <: DiscordObject end
function AbstractGuild(; kwargs...)
return if get(kwargs, :unavailable, length(kwargs) <= 2) === true
UnavailableGuild(; kwargs...)
else
Guild(; kwargs...)
end
end
AbstractGuild(d::Dict{Symbol, Any}) = AbstractGuild(; d...)
mock(::Type{AbstractGuild}) = mock(rand(Bool) ? UnavailableGuild : Guild)
"""
An unavailable Discord guild (server).
More details [here](https://discordapp.com/developers/docs/resources/guild#unavailable-guild-object).
"""
struct UnavailableGuild <: AbstractGuild
id::Snowflake
unavailable::Optional{Bool}
end
@boilerplate UnavailableGuild :constructors :docs :lower :merge :mock
"""
A Discord guild (server).
More details [here](https://discordapp.com/developers/docs/resources/guild#guild-object).
"""
struct Guild <: AbstractGuild
id::Snowflake
name::String
icon::Nullable{String}
splash::OptionalNullable{String}
owner::Optional{Bool}
owner_id::Optional{Snowflake} # Missing in Invite.
permissions::Optional{String}
region::Optional{String} # Invite
afk_channel_id::OptionalNullable{Snowflake} # Invite
afk_timeout::Optional{Int} # Invite
embed_enabled::Optional{Bool}
embed_channel_id::OptionalNullable{Snowflake} # Not supposed to be nullable.
verification_level::Optional{Int}
default_message_notifications::Optional{Int} # Invite
explicit_content_filter::Optional{Int} # Invite
roles::Optional{Vector{Role}} # Invite
emojis::Optional{Vector{Emoji}} # Invite
features::Optional{Vector{String}}
mfa_level::Optional{Int} # Invite
application_id::OptionalNullable{Snowflake} # Invite
widget_enabled::Optional{Bool}
widget_channel_id::OptionalNullable{Snowflake} # Not supposed to be nullable.
system_channel_id::OptionalNullable{Snowflake} # Invite
joined_at::Optional{DateTime}
large::Optional{Bool}
unavailable::Optional{Bool}
member_count::Optional{Int}
max_members::Optional{Int}
voice_states::Optional{Vector{VoiceState}}
members::Optional{Vector{Member}}
channels::Optional{Vector{DiscordChannel}}
presences::Optional{Vector{Presence}}
max_presences::OptionalNullable{Int}
vanity_url_code::OptionalNullable{String}
description::OptionalNullable{String}
banner::OptionalNullable{String} # Hash
djl_users::Optional{Set{Snowflake}}
djl_channels::Optional{Set{Snowflake}}
end
@boilerplate Guild :constructors :docs :lower :merge :mock
Base.merge(x::UnavailableGuild, y::Guild) = y
Base.merge(x::Guild, y::UnavailableGuild) = x
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 288 | export GuildEmbed
"""
A [`Guild`](@ref) embed.
More details [here](https://discordapp.com/developers/docs/resources/guild#guild-embed-object).
"""
struct GuildEmbed
enabled::Bool
channel_id::Nullable{Snowflake}
end
@boilerplate GuildEmbed :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 5015 | export Handler,
Context
const EVENT_TYPES = Dict{String, Symbol}(
"READY" => :Ready,
"RESUMED" => :Resumed,
"CHANNEL_CREATE" => :ChannelCreate,
"CHANNEL_UPDATE" => :ChannelUpdate,
"CHANNEL_DELETE" => :ChannelDelete,
"CHANNEL_PINS_UPDATE" => :ChannelPinsUpdate,
"GUILD_CREATE" => :GuildCreate,
"GUILD_UPDATE" => :GuildUpdate,
"GUILD_DELETE" => :GuildDelete,
"GUILD_BAN_ADD" => :GuildBanAdd,
"GUILD_BAN_REMOVE" => :GuildBanRemove,
"GUILD_EMOJIS_UPDATE" => :GuildEmojisUpdate,
"GUILD_INTEGRATIONS_UPDATE" => :GuildIntegrationsUpdate,
"GUILD_MEMBER_ADD" => :GuildMemberAdd,
"GUILD_MEMBER_REMOVE" => :GuildMemberRemove,
"GUILD_MEMBER_UPDATE" => :GuildMemberUpdate,
"GUILD_MEMBERS_CHUNK" => :GuildMembersChunk,
"GUILD_ROLE_CREATE" => :GuildRoleCreate,
"GUILD_ROLE_UPDATE" => :GuildRoleUpdate,
"GUILD_ROLE_DELETE" => :GuildRoleDelete,
"MESSAGE_CREATE" => :MessageCreate,
"MESSAGE_UPDATE" => :MessageUpdate,
"MESSAGE_DELETE" => :MessageDelete,
"MESSAGE_DELETE_BULK" => :MessageDeleteBulk,
"MESSAGE_REACTION_ADD" => :MessageReactionAdd,
"MESSAGE_REACTION_REMOVE" => :MessageReactionRemove,
"MESSAGE_REACTION_REMOVE_ALL" => :MessageReactionRemoveAll,
"INTERACTION_CREATE" => :InteractionCreate,
"PRESENCE_UPDATE" => :PresenceUpdate,
"TYPING_START" => :TypingStart,
"USER_UPDATE" => :UserUpdate,
"VOICE_STATE_UPDATE" => :VoiceStateUpdate,
"VOICE_SERVER_UPDATE" => :VoiceServerUpdate,
"WEBHOOKS_UPDATE" => :WebhooksUpdate,
)
"""
Handler(
f::Function
d::Dict{Symbol, Any}
)
Handler is a wrapper for a `Dict{Symbol, Any}` that also contains a function.
"""
struct Handler
f::Function
d::Dict{Symbol, Any}
end
"""
Handler(; kwargs...) -> Handler
Generates a handler based on kwargs and a function.
"""
Handler(f; kwargs...) = Handler(f, Dict(kwargs))
"""
Context(
data::Dict{Symbol, Any}
)
Context is a wrapper for a `Dict{Symbol, Any}` with some special functionality.
"""
struct Context
data::Dict{Symbol, Any}
end
"""
Context(; kwargs...) -> Context
Generates a context based on kwargs.
"""
Context(; kwargs...) = Context(Dict(kwargs))
quickdoc(name::String, ar::String) = "
$(name)(
f::Function
c::Client
)
Adds a handler for the $(replace(ar, "_"=>"\\_")) gateway event.
The `f` parameter's signature should be:
```
(ctx::Context) -> Any
```
"
context(data::Dict{Symbol, Any}) = Context(data)
"""
context(t::Symbol, data::Dict{Symbol, Any}) -> Context
Checks if the Context needs to be created in a special way based on the event provided by `t`.\n
Then, returns the generated context.
"""
function context(t::Symbol, data::Dict{Symbol, Any})
t == :OnMessageCreate && return Context(; message=Message(data))
t ∈ [:OnGuildCreate, :OnGuildUpdate] && return Context(; guild=Guild(data))
t ∈ [:OnMessageReactionAdd, :OnMessageReactionRemove] && return Context(;
emoji=make(Emoji, data, :emoji),
channel=DiscordChannel(; id=data[:channel_id]),
message=Message(; id=data[:message_id], channel_id=data[:channel_id]),
data...)
t == :OnReady && return Context(; user=make(User, data, :user), data...)
t == :OnInteractionCreate && return Context(; interaction=Interaction(data))
Context(data)
end
make(::Type{T}, data::Dict{Symbol, Any}, k::Symbol) where T <: DiscordObject = T(pop!(data, k, missing))
for (k, v) in EVENT_TYPES
nm = Symbol("On"*String(v))
hm = Symbol("on_"*lowercase(k)*"!")
@eval ($nm)(f::Function; kwargs...) = Handler(f; type=Symbol($nm), kwargs...)
k = quote
@doc quickdoc(string($hm), $k)
($hm)(f::Function, c; kwargs...) = add_handler!(c, ($nm)(f, kwargs...))
export $hm
end
eval(k)
end
# Deprecated
# ----------------------------------------
# | Removed: 1.0+ |
# | Added 0.3 |
# | Replaced by on_message_create! |
# ----------------------------------------
const on_message! = on_message_create!
const on_reaction_add! = on_message_reaction_add!
const on_reaction_remove! = on_message_reaction_remove!
Base.getproperty(ctx::Context, sym::Symbol) = getfield(ctx, :data)[sym]
Base.hasproperty(ctx::Context, sym::Symbol) = haskey(getfield(ctx, :data), sym)
Base.getproperty(h::Handler, sym::Symbol) = sym != :f ? getfield(h, :d)[sym] : getfield(h, sym)
Base.hasproperty(h::Handler, sym::Symbol) = haskey(getfield(h, :d), sym)
handlerkind(h::Handler) = h.type
handlerargs(f::Function) = method_args(first(methods(f)))
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 778 | export Integration
"""
An [`Integration`](@ref) account.
More details [here](https://discordapp.com/developers/docs/resources/guild#integration-account-object).
"""
struct IntegrationAccount
id::String
name::String
end
@boilerplate IntegrationAccount :constructors :docs :lower :merge :mock
"""
A [`Guild`](@ref) integration.
More details [here](https://discordapp.com/developers/docs/resources/guild#integration-object).
"""
struct Integration <: DiscordObject
id::Snowflake
name::String
type::String
enabled::Bool
syncing::Bool
role_id::Snowflake
expire_behaviour::Int
expire_grace_period::Int
user::User
account::IntegrationAccount
synced_at::DateTime
end
@boilerplate Integration :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 4762 | export Interaction,
InteractionData,
ApplicationCommand,
ApplicationCommandOption,
ApplicationCommandChoice,
Component,
SelectOption,
ResolvedData
struct ResolvedData
users::Optional{Dict{Snowflake, User}}
members::Optional{Dict{Snowflake, Member}}
roles::Optional{Dict{Snowflake, Role}}
channels::Optional{Dict{Snowflake, Channel}}
messages::Optional{Dict{Snowflake, Message}}
end
@boilerplate ResolvedData :constructors :lower :merge
"""
Application Command Choice.
More details [here](https://discord.com/developers/docs/interactions/application-commands#application-command-object-application-command-option-choice-structure).
"""
struct ApplicationCommandChoice
name::String
value::Union{String, Number}
end
@boilerplate ApplicationCommandChoice :constructors :docs :lower :merge
# TODO: Custom type gen for `value` field of ApplicationCommandOption.
"""
Application Command Option.
More details [here](https://discord.com/developers/docs/interactions/application-commands#application-command-object-application-command-option-structure).
"""
struct ApplicationCommandOption
value::Any
type::Optional{Int}
name::Optional{String}
description::Optional{String}
required::Optional{Bool}
min_value::Optional{Number}
max_value::Optional{Number}
autocomplete::Optional{Bool}
choices::Optional{Vector{ApplicationCommandChoice}}
options::Optional{Vector{ApplicationCommandOption}}
channel_types::Optional{Vector{Int}}
focused::Optional{Bool}
end
@boilerplate ApplicationCommandOption :constructors :docs :lower :merge
"""
An Application Command.
More details [here](https://discord.com/developers/docs/interactions/application-commands#application-commands).
"""
struct ApplicationCommand <: DiscordObject
id::OptionalNullable{Snowflake}
type::Optional{Int}
application_id::Snowflake
guild_id::Optional{Snowflake}
name::String
description::String
options::Optional{Vector{ApplicationCommandOption}}
default_permissions::Optional{Bool}
version::Optional{Snowflake}
end
@boilerplate ApplicationCommand :constructors :docs :lower :merge :mock
"""
A select option.
More details [here](https://discord.com/developers/docs/interactions/message-components#select-menu-object-select-option-structure).
"""
struct SelectOption <: DiscordObject
label::String
value::String
description::Optional{String}
emoji::Optional{Emoji}
default::Optional{Bool}
end
@boilerplate SelectOption :constructors :docs :lower :merge :mock
function SelectOption(label, value, args...)
if length(args)>0
r = [:description, :emoji, :default]
t = Dict([(r[x+1], args[x+1]) for x in 0:length(args)])
SelectOption(; label=label, value=value, t...)
else SelectOption(; label=label, value=value) end
end
"""
An interactable component.
More details [here](https://discord.com/developers/docs/interactions/message-components).
"""
struct Component <: DiscordObject
type::Int
custom_id::Optional{String}
value::Optional{String}
disabled::Optional{Bool}
style::Optional{Int}
label::Optional{String}
emoji::Optional{Emoji}
url::Optional{String}
options::Optional{Vector{SelectOption}}
placeholder::Optional{String}
min_values::Optional{Int}
max_values::Optional{Int}
components::Optional{Vector{Component}}
end
@boilerplate Component :constructors :docs :lower :merge :mock
"""
Data for an interaction.
More details [here](https://discord.com/developers/docs/interactions/receiving-and-responding#interaction-object-interaction-data-structure).
"""
struct InteractionData <: DiscordObject
id::OptionalNullable{Snowflake}
name::OptionalNullable{String}
type::OptionalNullable{Int}
resolved::Optional{ResolvedData}
options::Optional{Vector{ApplicationCommandOption}}
custom_id::OptionalNullable{String}
component_type::OptionalNullable{Int}
components::OptionalNullable{Vector{Component}}
values::Optional{Vector{String}}
target_id::Optional{Snowflake}
end
@boilerplate InteractionData :constructors :docs :lower :merge
"""
An interaction.
More details [here](https://discord.com/developers/docs/interactions/receiving-and-responding#interaction-object-interaction-structure).
"""
struct Interaction <: DiscordObject
id::Nullable{Snowflake}
application_id::Nullable{Snowflake}
type::Int
data::OptionalNullable{InteractionData}
guild_id::Optional{Snowflake}
channel_id::Optional{Snowflake}
member::Optional{Member}
user::Optional{User}
token::String
version::Optional{Int}
message::Optional{Message}
end
@boilerplate Interaction :constructors :docs :lower :merge :mock | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 387 | export Invite
"""
An invite to a [`Guild`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/invite#invite-object).
"""
struct Invite
code::String
guild::Optional{Guild}
channel::DiscordChannel
approximate_presence_cound::Optional{Int}
approximate_member_count::Optional{Int}
end
@boilerplate Invite :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 367 | """
Metadata for an [`Invite`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/invite#invite-metadata-object).
"""
struct InviteMetadata
inviter::User
uses::Int
max_uses::Int
max_age::Int
temporary::Bool
created_at::DateTime
revoked::Bool
end
@boilerplate InviteMetadata :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 491 | export Member
"""
A [`Guild`](@ref) member.
More details [here](https://discordapp.com/developers/docs/resources/guild#guild-member-object).
"""
mutable struct Member <: DiscordObject
user::Optional{User}
nick::OptionalNullable{String} # Not supposed to be nullable.
roles::Vector{Snowflake}
joined_at::DateTime
premium_since::OptionalNullable{DateTime}
deaf::Optional{Bool}
mute::Optional{Bool}
end
@boilerplate Member :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 2059 | export Message
"""
A [`Message`](@ref) activity.
More details [here](https://discordapp.com/developers/docs/resources/channel#message-object-message-activity-structure).
"""
struct MessageActivity
type::Int
party_id::Optional{String}
end
@boilerplate MessageActivity :constructors :docs :lower :merge :mock
"""
A Rich Presence [`Message`](@ref)'s application information.
More details [here](https://discordapp.com/developers/docs/resources/channel#message-object-message-application-structure).
"""
struct MessageApplication <: DiscordObject
id::Snowflake
cover_image::Optional{String}
description::String
icon::String
name::String
end
@boilerplate MessageApplication :constructors :docs :lower :merge :mock
struct MessageReference <: DiscordObject
message_id::Snowflake
channel_id::Optional{Snowflake}
guild_id::Optional{Snowflake}
fail_if_not_exists::Optional{Bool}
end
@boilerplate MessageReference :constructors :lower :merge
"""
A message sent to a [`DiscordChannel`](@ref).
More details [here](https://discordapp.com/developers/docs/resources/channel#message-object).
"""
struct Message <: DiscordObject
id::Snowflake
channel_id::Snowflake
# MessageUpdate only requires the ID and channel ID.
guild_id::Optional{Snowflake}
author::Optional{User}
member::Optional{Member}
content::Optional{String}
timestamp::Optional{DateTime}
edited_timestamp::OptionalNullable{DateTime}
tts::Optional{Bool}
mention_everyone::Optional{Bool}
mentions::Optional{Vector{User}}
mention_roles::Optional{Vector{Snowflake}}
attachments::Optional{Vector{Attachment}}
message_reference::Optional{MessageReference}
embeds::Optional{Vector{Embed}}
reactions::Optional{Vector{Reaction}}
nonce::OptionalNullable{Snowflake}
pinned::Optional{Bool}
webhook_id::Optional{Snowflake}
type::Optional{Int}
activity::Optional{MessageActivity}
application::Optional{MessageApplication}
end
@boilerplate Message :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 315 | export Overwrite
"""
A permission overwrite.
More details [here](https://discordapp.com/developers/docs/resources/channel#overwrite-object).
"""
struct Overwrite <: DiscordObject
id::Snowflake
type::Int
allow::String
deny::String
end
@boilerplate Overwrite :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 397 | export Presence
"""
A [`User`](@ref)'s presence.
More details [here](https://discordapp.com/developers/docs/topics/gateway#presence-update).
"""
struct Presence
user::User
roles::Optional{Vector{Snowflake}}
game::Nullable{Activity}
guild_id::Optional{Snowflake}
status::String
activities::Vector{Activity}
end
@boilerplate Presence :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 277 | export Reaction
"""
A [`Message`](@ref) reaction.
More details [here](https://discordapp.com/developers/docs/resources/channel#reaction-object).
"""
struct Reaction
count::Int
me::Bool
emoji::Emoji
end
@boilerplate Reaction :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 485 | export Role
"""
A [`User`](@ref) role.
More details [here](https://discordapp.com/developers/docs/topics/permissions#role-object).
"""
struct Role <: DiscordObject
id::Snowflake
name::String
color::Optional{Int} # These fields are missing in audit log entries.
hoist::Optional{Bool}
position::Optional{Int}
permissions::Optional{String}
managed::Optional{Bool}
mentionable::Optional{Bool}
end
@boilerplate Role :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 8351 | # First millisecond of 2015.
const DISCORD_EPOCH = 1420070400000
# Discord's form of ID.
const Snowflake = UInt64
abstract type DiscordObject end
snowflake(s::Integer) = Snowflake(s)
snowflake(s::AbstractString) = parse(Snowflake, s)
function snowflake(s::Symbol)
try
parse(Snowflake, string(s))
catch
return s
end
end
snowflake(s::Any) = s
snowflake2datetime(s::Snowflake) = unix2datetime(((s >> 22) + DISCORD_EPOCH) / 1000)
worker_id(s::Snowflake) = (s & 0x3e0000) >> 17
process_id(s::Snowflake) = (s & 0x1f000) >> 12
increment(s::Snowflake) = s & 0xfff
# Discord sends both Unix and ISO timestamps.
datetime(s::Int64) = unix2datetime(s / 1000)
datetime(s::AbstractString) = DateTime(replace(s, "+" => ".000+")[1:23], ISODateTimeFormat)
datetime(d::DateTime) = d
macro lower(T)
# do nothing
quote
end
end
# Define Base.merge for a type.
macro merge(T)
quote
function Base.merge(a::$T, b::$T)
vals = map(fieldnames($T)) do f
va = getfield(a, f)
vb = getfield(b, f)
ismissing(vb) ? va : vb
end
return $T(vals...)
end
Base.merge(::Missing, x::$T) = x
Base.merge(x::$T, ::Missing) = x
end
end
# Compute the expression needed to extract field k from keywords.
field(k::QuoteNode, ::Type{Snowflake}) = :(snowflake(kwargs[$k]))
field(k::QuoteNode, ::Type{DateTime}) = :(datetime(kwargs[$k]))
field(k::QuoteNode, ::Type{T}) where T = :($T(kwargs[$k]))
field(k::QuoteNode, ::Type{Vector{Snowflake}}) = :(snowflake.(kwargs[$k]))
field(k::QuoteNode, ::Type{Vector{DateTime}}) = :(datetime.(kwargs[$k]))
field(k::QuoteNode, ::Type{Vector{T}}) where T = :($T.(kwargs[$k]))
field(k::QuoteNode, ::Type{Any}) = :(haskey(kwargs, $k) ? kwargs[$k] : missing)
field(k::QuoteNode, ::Type{Dict{Any, T}}) where T = :(snowify(T, kwargs[$k]))
function field(k::QuoteNode, ::Type{T}) where T <: Enum
return :(kwargs[$k] isa Integer ? $T(Int(kwargs[$k])) :
kwargs[$k] isa $T ? kwargs[$k] : $T(kwargs[$k]))
end
function field(k::QuoteNode, ::Type{Optional{T}}) where T
return :(haskey(kwargs, $k) ? $(field(k, T)) : missing)
end
function field(k::QuoteNode, ::Type{Nullable{T}}) where T
return :(kwargs[$k] === nothing ? nothing : $(field(k, T)))
end
function field(k::QuoteNode, ::Type{OptionalNullable{T}}) where T
return :(haskey(kwargs, $k) ? $(field(k, Nullable{T})) : missing)
end
function cvs(::Type{T}, d::Dict) where T
new = Dict()
for (k, v) in d
new[k] = T(v)
end
new
end
function symbolize(dict::Dict{String, Any})
new = Dict{Symbol, Any}()
for (k, v) in dict
new[Symbol(k)] = symbolize(v)
end
new
end
symbolize(t::Any) = t
function snowify(d::Dict{Symbol, Any})
f = Dict()
for (k, v) in d
f[snowflake(k)] = v
end
f
end
# convert(::Type{Snowflake}, s::Symbol) = parse(Snowflake, s)
function snowify(d::Dict{Symbol, V}) where V<:Dict{Symbol, Any}
f = Dict()
for (k, v) in d
f[k] = snowify(v)
end
f
end
snowify(a::Any) = a
# Define constructors from keyword arguments and a Dict for a type.
macro constructors(T)
TT = eval(T)
args = map(f -> field(QuoteNode(f), fieldtype(TT, f)), fieldnames(TT))
quote
function $(esc(T))(; kwargs...)
kwargs = snowify(Dict(kwargs))
$(esc(T))($(args...))
end
$(esc(T))(d::Dict{Symbol, Any}) = $(esc(T))(; d...)
$(esc(T))(d::Dict{String, Any}) = $(esc(T))(symbolize(d))
$(esc(T))(x::$(esc(T))) = x
Base.convert(::Type{$(esc(T))}, d::Dict{Symbol, Any})= $(esc(T))(d)
function StructTypes.keyvaluepairs(x::$(esc(T)))
d = Dict{Symbol, Any}()
for k = fieldnames(typeof(x))
d[k] = getfield(x, k)
end
d
end
StructTypes.StructType(::Type{$(esc(T))}) = StructTypes.DictType()
end
end
macro convertenum(T)
quote
Base.convert(::Type{$(esc(T))}, x::Int) = $(esc(T))(x)
end
end
# Export all instances of an enum.
macro exportenum(T)
TT = eval(T)
quote
$(map(x -> :(export $(Symbol(x))), instances(TT))...)
end
end
# Format a type for a docstring.
function doctype(s::String)
s = replace(s, "UInt64" => "Snowflake")
s = replace(s, string(Int) => "Int")
s = replace(s, "Discord." => "")
s = replace(s, "Dates." => "")
m = match(r"Array{([^{}]+),1}", s)
m === nothing || (s = replace(s, m.match => "Vector{$(m.captures[1])}"))
m = match(r"Union{Missing, Nothing, (.+)}", s)
m === nothing || return replace(s, m.match => "OptionalNullable{$(m.captures[1])}")
m = match(r"Union{Missing, (.+)}", s)
m === nothing || return replace(s, m.match => "Optional{$(m.captures[1])}")
m = match(r"Union{Nothing, (.+)}", s)
m === nothing || return replace(s, m.match => "Nullable{$(m.captures[1])}")
return s
end
# Update a type's docstring with field names and types.
macro fielddoc(T)
TT = eval(T)
fields = filter(n -> !startswith(string(n), "djl_"), collect(fieldnames(TT)))
ns = collect(string.(fields))
width = maximum(length, ns)
map!(n -> rpad(n, width), ns, ns)
ts = collect(map(f -> string(fieldtype(TT, f)), fields))
map!(doctype, ts, ts)
docs = join(map(t -> "$(t[1]) :: $(t[2])", zip(ns, ts)), "\n")
quote
doc = string(@doc $T)
docstring = doc * "\n## Fields\n\n```\n" * $docs * "\n```\n"
Base.CoreLogging.with_logger(Base.CoreLogging.NullLogger()) do
@doc docstring $T
end
end
end
# Produce a random string.
randstring() = String(filter(!ispunct, map(i -> Char(rand(48:122)), 1:rand(1:20))))
# Produce a randomized value of a type.
mock(::Type{Bool}; kwargs...) = rand(Bool)
mock(::Type{DateTime}; kwargs...) = now()
mock(::Type{AbstractString}; kwargs...) = randstring()
mock(::Type{Dict{Symbol, Any}}; kwargs...) = Dict(:a => mock(String), :b => mock(Int))
mock(::Type{T}; kwargs...) where T <: AbstractString = T(randstring())
mock(::Type{T}; kwargs...) where T <: Integer = abs(rand(T))
mock(::Type{T}; kwargs...) where T <: Enum = instances(T)[rand(1:length(instances(T)))]
mock(::Type{Vector{T}}; kwargs...) where T = map(i -> mock(T; kwargs...), 1:rand(1:10))
mock(::Type{Set{T}}; kwargs...) where T = Set(map(i -> mock(T; kwargs...), 1:rand(1:10)))
mock(::Type{Optional{T}}; kwargs...) where T = mock(T; kwargs...)
mock(::Type{Nullable{T}}; kwargs...) where T = mock(T; kwargs...)
mock(::Type{OptionalNullable{T}}; kwargs...) where T = mock(T; kwargs...)
# Define a mock method for a type.
macro mock(T)
quote
function $(esc(:mock))(::Type{$T}; kwargs...)
names = fieldnames($(esc(T)))
types = map(TT -> fieldtype($(esc(T)), TT), names)
args = Vector{Any}(undef, length(names))
for (i, (n, t)) in enumerate(zip(names, types))
args[i] = haskey(kwargs, n) ? kwargs[n] : mock(t; kwargs...)
end
return $(esc(T))(args...)
end
end
end
# Apply the above macros to a type.
macro boilerplate(T, exs...)
macros = map(e -> e.value, exs)
quote
@static if :constructors in $macros
@constructors $T
end
@static if :docs in $macros
@fielddoc $T
end
@static if :convertenum in $macros
@convertenum $T
end
@static if :export in $macros
@exportenum $T
end
@static if :lower in $macros
@lower $T
end
@static if :merge in $macros
@merge $T
end
@static if :mock in $macros
@mock $T
end
end
end
include("overwrite.jl")
include("role.jl")
include("guild_embed.jl")
include("attachment.jl")
include("voice_region.jl")
include("activity.jl")
include("embed.jl")
include("user.jl")
include("ban.jl")
include("integration.jl")
include("connection.jl")
include("emoji.jl")
include("reaction.jl")
include("presence.jl")
include("channel.jl")
include("webhook.jl")
include("invite_metadata.jl")
include("member.jl")
include("voice_state.jl")
include("message.jl")
include("guild.jl")
include("invite.jl")
include("audit_log.jl")
include("interaction.jl")
include("handlers.jl")
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 579 | export User
"""
A Discord user.
More details [here](https://discordapp.com/developers/docs/resources/user#user-object).
"""
struct User <: DiscordObject
id::Snowflake
# The User inside of a Presence only needs its ID set.
username::Optional{String}
discriminator::Optional{String}
avatar::OptionalNullable{String}
bot::Optional{Bool}
mfa_enabled::Optional{Bool}
locale::Optional{String}
verified::Optional{Bool}
email::OptionalNullable{String} # Not supposed to be nullable.
end
@boilerplate User :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 362 | export VoiceRegion
"""
A region for a [`Guild`](@ref)'s voice server.
More details [here](https://discordapp.com/developers/docs/resources/voice#voice-region-object).
"""
struct VoiceRegion
id::String
name::String
vip::Bool
optimal::Bool
deprecated::Bool
custom::Bool
end
@boilerplate VoiceRegion :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 468 | """
A [`User`](@ref)'s voice connection status.
More details [here](https://discordapp.com/developers/docs/resources/voice#voice-state-object).
"""
struct VoiceState
guild_id::Optional{Snowflake}
channel_id::Nullable{Snowflake}
user_id::Snowflake
member::Optional{Member}
session_id::String
deaf::Bool
mute::Bool
self_deaf::Bool
self_mute::Bool
suppress::Bool
end
@boilerplate VoiceState :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 429 | export Webhook
"""
A Webhook.
More details [here](https://discordapp.com/developers/docs/resources/webhook#webhook-object).
"""
struct Webhook
id::Snowflake
guild_id::Optional{Snowflake}
channel_id::Snowflake
user::Optional{User}
name::Nullable{String}
avatar::Nullable{String}
token::Optional{String} # Missing in audit log entries.
end
@boilerplate Webhook :constructors :docs :lower :merge :mock
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 5093 | """
An [`Activity`](@ref)'s type. Available values are `GAME`, `STREAMING`,
`LISTENING`, `WATCHING`, and `COMPETING`.
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-types).
"""
module ActivityType
const GAME=0
const STREAMING=1
const LISTENING=2
const WATCHING=3
const CUSTOM=4
const COMPETING=5
end
"""
Flags which indicate what an [`Activity`](@ref) payload contains.
More details [here](https://discordapp.com/developers/docs/topics/gateway#activity-object-activity-flags).
"""
module ActivityFlags
const INSTANCE=1<<0
const JOIN=1<<1
const SPECTATE=1<<2
const JOIN_REQUEST=1<<3
const SYNC=1<<4
const PLAY=1<<5
end
"""
[`AuditLog`](@ref) action types.
More details [here](https://discordapp.com/developers/docs/resources/audit-log#audit-log-entry-object-audit-log-events).
"""
module ActionType
const GUILD_UPDATE=1
const CHANNEL_CREATE=10
const CHANNEL_UPDATE=11
const CHANNEL_DELETE=12
const CHANNEL_OVERWRITE_CREATE=13
const CHANNEL_OVERWRITE_UPDATE=14
const CHANNEL_OVERWRITE_DELETE=15
const MEMBER_KICK=20
const MEMBER_PRUNE=21
const MEMBER_BAN_ADD=22
const MEMBER_BAN_REMOVE=23
const MEMBER_UPDATE=24
const MEMBER_ROLE_UPDATE=25
const ROLE_CREATE=30
const ROLE_UPDATE=31
const ROLE_DELETE=32
const INVITE_CREATE=40
const INVITE_UPDATE=41
const INVITE_DELETE=42
const WEBHOOK_CREATE=50
const WEBHOOK_UPDATE=51
const WEBHOOK_DELETE=52
const EMOJI_CREATE=60
const EMOJI_UPDATE=61
const EMOJI_DELETE=62
const MESSAGE_DELETE=72
end
"""
A [`DiscordChannel`](@ref)'s type. See full list
at https://discord.com/developers/docs/resources/channel#channel-object-channel-types
"""
module ChannelTypes
const GUILD_TEXT = 0
const DM = 1
const GUILD_VOICE = 2
const GROUP_DM = 3
const GUILD_CATEGORY = 4
const GUILD_NEWS = 5
const GUILD_STORE = 6
const GUILD_NEWS_THREAD = 10
const GUILD_PUBLIC_THREAD = 11
const GUILD_PRIVATE_THREAD = 12
const GUILD_STAGE_VOICE = 13
end
"""
A [`Guild`](@ref)'s verification level.
More details [here](https://discordapp.com/developers/docs/resources/guild#guild-object-verification-level).
"""
module VerificationLevel
const NONE = 0
const LOW = 1
const MEDIUM = 2
const HIGH = 3
const VERY_HIGH = 4
end
"""
A [`Guild`](@ref)'s default message notification level.
More details [here](https://discordapp.com/developers/docs/resources/guild#guild-object-default-message-notification-level).
"""
module MessageNotificationLevel
const ALL_MESSAGES = 0
const ONLY_MENTIONS = 1
end
"""
A [`Guild`](@ref)'s explicit content filter level.
More details [here](https://discordapp.com/developers/docs/resources/guild#guild-object-explicit-content-filter-level).
"""
module ExplicitContentFilterLevel
const DISABLED = 0
const MEMBERS_WITHOUT_ROLES = 1
const ALL_MEMBERS = 2
end
"""
A [`Guild`](@ref)'s MFA level.
More details [here](https://discordapp.com/developers/docs/resources/guild#guild-object-mfa-level).
"""
module MFALevel
const NONE = 0
const ELEVATED = 1
end
module InteractionType
const PING = 1
const APPLICATIONCOMMAND = 2
const MESSAGECOMPONENT = 3
end
module ApplicationCommandType
const CHATINPUT = 1
const UI = 2
const MESSAGE = 3
end
module ComponentType
const ACTIONROW = 1
const BUTTON = 2
const SELECTMENU = 3
end
module OptionType
const SUB_COMMAND = 1
const SUB_COMMAND_GROUP = 2
const STRING = 3
const INTEGER = 4
const BOOLEAN = 5
const USER = 6
const CHANNEL = 7
const ROLE = 8
const MENTIONABLE = 9
const NUMBER = 10
end
"""
A [`Message`](@ref)'s type.
More details [here](https://discordapp.com/developers/docs/resources/channel#message-object-message-types).
"""
module MessageType
const DEFAULT = 0
const RECIPIENT_ADD = 1
const RECIPIENT_REMOVE = 2
const CALL = 3
const CHANNEL_NAME_CHANGE = 4
const CHANNEL_ICON_CHANGE = 5
const CHANNEL_PINNED_MESSAGE = 6
const GUILD_MEMBER_JOIN = 7
const USER_PREMIUM_GUILD_SUBSCRIPTION = 8
const USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_1 = 9
const USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_2 = 10
const USER_PREMIUM_GUILD_SUBSCRIPTION_TIER_3 = 11
const CHANNEL_FOLLOW_ADD = 12
const GUILD_DISCOVERY_DISQUALIFIED = 14
const GUILD_DISCOVERY_REQUALIFIED = 15
const GUILD_DISCOVERY_GRACE_PERIOD_INITIAL_WARNING = 16
const GUILD_DISCOVERY_GRACE_PERIOD_FINAL_WARNING = 17
const THREAD_CREATED = 18
const REPLY = 19
const CHAT_INPUT_COMMAND = 20
const THREAD_STARTER_MESSAGE = 21
const GUILD_INVITE_REMINDER = 22
const CONTEXT_MENU_COMMAND = 23
end
"""
A [`Message`](@ref)'s activity type.
More details [here](https://discordapp.com/developers/docs/resources/channel#message-object-message-activity-types).
"""
module MessageActivityType
const JOIN = 1
const SPECTATE = 2
const LISTEN = 3
const JOIN_REQUEST = 5
end | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 21172 | export PERM_NONE,
PERM_ALL,
has_permission,
permissions_in,
reply,
modal,
intents,
mention,
split_message,
Options,
plaintext,
heartbeat_ping,
upload_file,
set_game,
opt,
Option,
extops,
isme,
component,
@fetch,
@fetchval,
@deferred_fetch,
@deferred_fetchval
const CRUD_FNS = :create, :retrieve, :update, :delete
"""
Regex expressions for [`split_message`](@ref) to not break Discord formatting.
"""
const STYLES = [
r"```.+?```"s, r"`.+?`", r"~~.+?~~", r"(_|__).+?\1", r"(\*+).+?\1",
]
"""
Bitwise permission flags.
More details [here](https://discordapp.com/developers/docs/topics/permissions#permissions-bitwise-permission-flags).
"""
@enum Permission begin
PERM_CREATE_INSTANT_INVITE=1<<0
PERM_KICK_MEMBERS=1<<1
PERM_BAN_MEMBERS=1<<2
PERM_ADMINISTRATOR=1<<3
PERM_MANAGE_CHANNELS=1<<4
PERM_MANAGE_GUILD=1<<5
PERM_ADD_REACTIONS=1<<6
PERM_VIEW_AUDIT_LOG=1<<7
PERM_VIEW_CHANNEL=1<<10
PERM_SEND_MESSAGES=1<<11
PERM_SEND_TTS_MESSAGES=1<<12
PERM_MANAGE_MESSAGES=1<<13
PERM_EMBED_LINKS=1<<14
PERM_ATTACH_FILES=1<<15
PERM_READ_MESSAGE_HISTORY=1<<16
PERM_MENTION_EVERYONE=1<<17
PERM_USE_EXTERNAL_EMOJIS=1<<18
PERM_CONNECT=1<<20
PERM_SPEAK=1<<21
PERM_MUTE_MEMBERS=1<<22
PERM_DEAFEN_MEMBERS=1<<23
PERM_MOVE_MEMBERS=1<<24
PERM_USE_VAD=1<<25
PERM_PRIORITY_SPEAKER=1<<8
PERM_STREAM=1<<9
PERM_CHANGE_NICKNAME=1<<26
PERM_MANAGE_NICKNAMES=1<<27
PERM_MANAGE_ROLES=1<<28
PERM_MANAGE_WEBHOOKS=1<<29
PERM_MANAGE_EMOJIS=1<<30
end
@boilerplate Permission :export
@enum Intent begin
GUILDS = 1 << 0;
GUILD_MEMBERS = 1 << 1;
GUILD_BANS = 1 << 2;
GUILD_EMOJIS = 1 << 3;
GUILD_INTEGRATIONS = 1 << 4;
GUILD_WEBHOOKS = 1 << 5;
GUILD_INVITES = 1 << 6;
GUILD_VOICE_STATES = 1 << 7;
GUILD_PRESENCES = 1 << 8;
GUILD_MESSAGES = 1 << 9;
GUILD_MESSAGE_REACTIONS = 1 << 10;
GUILD_MESSAGE_TYPING = 1 << 11;
DIRECT_MESSAGES = 1 << 12;
DIRECT_MESSAGE_REACTIONS = 1 << 13;
DIRECT_MESSAGE_TYPING = 1 << 14;
end
@boilerplate Intent :export
intents(args...) = sum(Int.(args))
const PERM_NONE = 0
const PERM_ALL = |(Int.(instances(Permission))...)
"""
has_permission(perms::Integer, perm::Permission) -> Bool
Determine whether a bitwise OR of permissions contains one [`Permission`](@ref).
## Examples
```jldoctest; setup=:(using Ekztazy)
julia> has_permission(0x0420, PERM_VIEW_CHANNEL)
true
julia> has_permission(0x0420, PERM_ADMINISTRATOR)
false
julia> has_permission(0x0008, PERM_MANAGE_ROLES)
true
```
"""
function has_permission(perms::Integer, perm::Permission)
admin = perms & Int64(PERM_ADMINISTRATOR) == Int64(PERM_ADMINISTRATOR)
has = perms & Int64(perm) == Int64(perm)
return admin || has
end
"""
permissions_in(m::Member, g::Guild, ch::DiscordChannel) -> Int64
Compute a [`Member`](@ref)'s [`Permission`](@ref)s in a [`DiscordChannel`](@ref).
"""
function permissions_in(m::Member, g::Guild, ch::DiscordChannel)
!ismissing(m.user) && m.user.id == g.owner_id && return PERM_ALL
# Get permissions for @everyone.
idx = findfirst(r -> r.name == "@everyone", g.roles)
everyone = idx === nothing ? nothing : g.roles[idx]
perms = idx === nothing ? Int64(0) : everyone.permissions
perms & Int64(PERM_ADMINISTRATOR) == Int64(PERM_ADMINISTRATOR) && return PERM_ALL
roles = idx === nothing ? m.roles : [everyone.id; m.roles]
# Apply role overwrites.
for role in roles
idx = findfirst(
o -> o.type === OT_ROLE && o.id == role,
coalesce(ch.permission_overwrites, Overwrite[]),
)
if idx !== nothing
o = ch.permission_overwrites[idx]
perms &= ~o.deny
perms |= o.allow
end
end
# Apply user-specific overwrite.
if !ismissing(m.user)
idx = findfirst(
o -> o.type === OT_MEMBER && o.id == m.user.id,
coalesce(ch.permission_overwrites, Overwrite[]),
)
if idx !== nothing
o = ch.permission_overwrites[idx]
perms &= ~o.deny
perms |= o.allow
end
end
return perms
end
getId(int::Interaction) = ismissing(int.member) ? int.user.id : int.member.user.id
getId(ctx::Context) = hasproperty(ctx, :message) ? ctx.message.author.id : getId(ctx.interaction)
Base.print(io::IO, c::DiscordChannel) = print(io, "<#$(c.id)>")
Base.print(io::IO, r::Role) = print(io, "<@&$(r.id)>")
Base.print(io::IO, u::User) = print(io, "<@$(u.id)>")
function Base.print(io::IO, m::Member)
if ismissing(m.user)
print(io, something(coalesce(m.nick, "<unknown member>"), "<unknown member>"))
elseif ismissing(m.nick) || m.nick === nothing
print(io, m.user)
else
print(io, "<@!$(m.user.id)>")
end
end
function Base.print(io::IO, e::Emoji)
s = if e.id === nothing
coalesce(e.require_colons, false) ? ":$(e.name):" : e.name
else
coalesce(e.animated, false) ? "<a:$(e.name):$(e.id)>" : "<:$(e.name):$(e.id)>"
end
print(io, s)
end
function compkwfix(; kwargs...)
if haskey(kwargs, :components)
temp = []
for c = kwargs[:components]
if c.type != 1
push!(temp, Component(; type=1, components=[c]))
else
push!(temp, c)
end
end
v = Dict(kwargs)
v[:components] = temp
return v
else
return Dict(kwargs)
end
end
"""
reply(
c::Client
context;
kwargs...
)
Replies to a [`Context`](@ref), an [`Interaction`](@ref) or a [`Message`](@ref).
"""
reply(c::Client, m::Message; noreply=false, kwargs...) = create_message(c, m.channel_id; message_reference=(noreply ? missing : MessageReference(message_id=m.id)), compkwfix(; kwargs...)...)
reply(c::Client, int::Interaction; raw=false, kwargs...) = !raw ? create(c, Message, int; compkwfix(; kwargs...)...) : respond_to_interaction(c, int.id, int.token; kwargs...)
reply(c::Client, ctx::Context; kwargs...) = reply(c, (hasproperty(ctx, :message) ? ctx.message : ctx.interaction); compkwfix(; kwargs...)...)
"""
mention(
o::DiscordObject
)
Generates the plaintext mention for a [`User`](@ref), a [`Member`](@ref), a [`DiscordChannel`](@ref), a [`Role`](@ref), or a [`Context`](@ref)
"""
mention(u::User) = "<@$(u.id)>"
mention(r::Role) = "<@&$(r.id)>"
mention(m::Member) = "<@$(m.user.id)>"
mention(c::DiscordChannel) = "<#$(c.id)>"
mention(m::Message) = mention(m.author)
mention(i::Interaction) = ismissing(i.member) ? mention(i.user) : mention(i.member)
mention(ctx::Context) = if hasproperty(ctx, :interaction) return mention(ctx.interaction) elseif hasproperty(ctx, :message) return mention(ctx.message) end
"""
filter_ranges(u::Vector{UnitRange{Int}})
Filter a list of ranges, discarding ranges included in other ranges from the list.
# Example
```jldoctest; setup=:(using Ekztazy)
julia> Ekztazy.filter_ranges([1:5, 3:8, 1:20, 2:16, 10:70, 25:60, 5:35, 50:90, 10:70])
4-element Vector{UnitRange{Int64}}:
1:20
5:35
50:90
10:70
```
"""
function filter_ranges(u::Vector{UnitRange{Int}})
v = fill(true, length(u))
for i in 1:length(u)
if !all(m -> (m[1] == i) || (u[i] ⊈ m[2]),
m for m in enumerate(u) if v[m[1]] == true)
v[i] = false
end
end
return u[v]
end
"""
split_message(text::AbstractString; chunk_limit::UInt=2000,
extrastyles::Vector{Regex}=Vector{Regex}(),
forcesplit::Bool = true) -> Vector{String}
Split a message into chunks with at most chunk_limit length, preserving formatting.
The `chunk_limit` has as default the 2000 character limit of Discord's messages,
but can be changed to any nonnegative integer.
Formatting is specified by [`STYLES`](@ref)) and can be aggregated
with the `extrastyles` argument.
Discord limits messages to 2000, so the code forces split if format breaking
cannot be avoided. If desired, however, this behavior can be lifter by setting
`forcesplit` to false.
## Examples
```julia
julia> split_message("foo")
1-element Vector{String}:
"foo"
julia> split_message(repeat('.', 1995) * "**hello, world**")[2]
"**hello, world**"
julia> split_message("**hello**, *world*", chunk_limit=10)
2-element Vector{String}:
"**hello**,"
"*world*"
julia> split_message("**hello**, _*beautiful* world_", chunk_limit=15)
┌ Warning: message was forced-split to fit the desired chunk length limit 15
└ @ Main REPL[66]:28
3-element Vector{String}:
"**hello**,"
"_*beautiful* wo"
"rld_"
julia> split_message("**hello**, _*beautiful* world_", chunk_limit=15, forcesplit=false)
┌ Warning: message could not be split into chunks smaller than the length limit 15
└ @ Main REPL[66]:32
2-element Vector{String}:
"**hello**,"
"_*beautiful* world_"
julia> split_message("**hello**\\n=====\\n", chunk_limit=12)
2-element Vector{String}:
"**hello**\\n=="
"==="
julia> split_message("**hello**\\n≡≡≡≡≡\\n", chunk_limit=12, extrastyles = [r"\\n≡+\\n"])
2-element Vector{String}:
"**hello**"
"≡≡≡≡≡"
```
"""
function split_message(text::AbstractString; chunk_limit::Int=2000,
extrastyles::Vector{Regex}=Vector{Regex}(),
forcesplit::Bool = true)
chunks = String[]
text = strip(text)
while !isempty(text)
if length(text) ≤ chunk_limit
push!(chunks, strip(text))
return chunks
end
# get ranges associated with the formattings
# mranges = vcat(findall.(union(STYLES, extrastyles),Ref(text))...) can't use findall in julia 1.0 and 1.1 ...
mranges = [m.offset:m.offset+ncodeunits(m.match)-1 for m in vcat(collect.(eachmatch.(union(STYLES, extrastyles), text))...)]
# filter ranges to eliminate inner formattings
franges = filter_ranges(mranges)
# get ranges that get split apart by the chunk limit - there should be only one, unless text is ill-formatted
splitranges = filter(r -> (length(text[1:r[1]]) ≤ chunk_limit) & (length(text[1:r[end]]) > chunk_limit), franges)
if length(splitranges) > 0
stop = minimum(map(r -> prevind(text, r[1]), splitranges))
end
if length(splitranges) == 0
# get highest valid unicode index if no range is split apart
stop = maximum(filter(n -> length(text[1:n])≤chunk_limit, thisind.(Ref(text), 1:ncodeunits(text))))
elseif (stop == 0) && (forcesplit == true)
# get highest valid unicode if format breaking cannot be avoided and forcesplit is true
stop = maximum(filter(n -> length(text[1:n])≤chunk_limit, thisind.(Ref(text), 1:ncodeunits(text))))
# @warn "message was forced-split to fit the desired chunk length limit $chunk_limit"
elseif stop == 0
# give up at this point if current chunk cannot be split and `forcesplit` is set to false
push!(chunks, strip(text))
# @warn "message could not be split into chunks smaller than the length limit $chunk_limit"
return chunks
end
# splits preferably at a space-like character
lastspace = findlast(isspace, text[1:stop])
if lastspace !== nothing
stop = lastspace
end
# push chunk and select remaining text
push!(chunks, strip(text[1:stop]))
text = strip(text[nextind(text, stop):end])
end
return chunks
end
"""
plaintext(m::Message) -> String
plaintext(c::Client, m::Message) -> String
Get the [`Message`](@ref) contents with any [`User`](@ref) mentions replaced with their
plaintext. If a [`Client`](@ref) is provided, [`DiscordChannel`](@ref)s [`Role`](@ref) are
also replaced. However, only channels and roles stored in state are replaced; no API
requests are made.
"""
function plaintext(m::Message)
content = m.content
for u in coalesce(m.mentions, User[])
name = "@$(u.username)"
content = replace(content, "<@$(u.id)>" => name)
content = replace(content, "<@!$(u.id)>" => name)
end
return content
end
function plaintext(c::Client, m::Message)
content = m.content
for u in coalesce(m.mentions, User[])
member = get(c.state, Member; guild=m.guild_id, user=u.id)
nick = if member !== nothing && member.nick isa String
"@$(member.nick)"
else
"@$(u.username)"
end
content = replace(content, "<@$(u.id)>" => "@$(u.username)")
content = replace(content, "<@!$(u.id)>" => "@$nick")
end
guild = get(c.state, Guild; guild=m.guild_id)
if guild !== nothing
for r in coalesce(m.mention_roles, Snowflake[])
role = get(c.state, Role; guild=m.guild_id, role=r)
if role !== nothing
content = replace(content, "<@&$r>" => "@$(role.name)")
end
end
for cap in unique(eachmatch(r"<#(\d+?)>", content))
ch = get(c.state, DiscordChannel; channel=parse(Snowflake, first(cap.captures)))
if ch !== nothing
content = replace(content, cap.match => "#$(ch.name)")
end
end
end
return content
end
"""
heartbeat_ping(c::Client) -> Nullable{Period}
Get the [`Client`](@ref)'s ping time to the gateway. If the client is not connected, or no
heartbeats have been sent/acknowledged, `nothing` is returned.
"""
function heartbeat_ping(c::Client)
isopen(c) || return nothing
zero = DateTime(0)
return c.last_hb == zero || c.last_ack == zero ? nothing : c.last_ack - c.last_hb
end
"""
upload_file(c::Client, ch::DiscordChannel, path::AbstractString; kwargs...) -> Message
Send a [`Message`](@ref) with a file [`Attachment`](@ref). Any keywords are passed on to
[`create_message`](@ref).
"""
function upload_file(c::Client, ch::DiscordChannel, path::AbstractString; kwargs...)
return create_message(c, ch.id; kwargs..., file=open(path))
end
"""
set_game(
c::Client,
game::AbstractString;
type::Int=AT.GAME,
since::Nullable{Int}=c.presence["since"],
status::Union{PresenceStatus, AbstractString}=c.presence["status"],
afk::Bool=c.presence["afk"],
kwargs...,
) -> Bool
Shortcut for [`update_status`](@ref) to set the [`Client`](@ref)'s [`Activity`](@ref). Any
additional keywords are passed into the `activity` section.
"""
function set_game(
c::Client,
game::AbstractString;
type::Int=ActionType.GAME,
since::Nullable{Int}=c.presence["since"],
status::Union{Int, AbstractString}=c.presence["status"],
afk::Bool=c.presence["afk"],
kwargs...,
)
activity = merge(Dict("name" => game, "type" => type), kwargs)
return update_status(c, since, activity, status, afk)
end
"""
@fetch [functions...] block
Wrap all calls to the specified CRUD functions ([`create`](@ref), [`retrieve`](@ref),
[`update`](@ref), and [`delete`](@ref)) with `fetch` inside a block. If no functions are
specified, all CRUD functions are wrapped.
## Examples
Wrapping all CRUD functions:
```julia
@fetch begin
guild_resp = create(c, Guild; name="foo")
guild_resp.ok || error("Request for new guild failed")
channel_resp = retrieve(c, DiscordChannel, guild_resp.val)
end
```
Wrapping only calls to `retrieve`:
```julia
@fetch retrieve begin
resp = retrieve(c, DiscordChannel, 123)
future = create(c, Message, resp.val; content="foo") # Behaves normally.
end
```
"""
macro fetch(exs...)
validate_fetch(exs...)
fns = length(exs) == 1 ? CRUD_FNS : exs[1:end-1]
ex = wrapfn!(exs[end], fns, :fetch)
quote
$ex
end
end
"""
@fetchval [functions...] block
Identical to [`@fetch`](@ref), but calls are wrapped with [`fetchval`](@ref) instead.
"""
macro fetchval(exs...)
validate_fetch(exs...)
fns = length(exs) == 1 ? CRUD_FNS : exs[1:end-1]
ex = wrapfn!(exs[end], fns, :fetchval)
quote
$ex
end
end
"""
@deferred_fetch [functions...] block
Identical to [`@fetch`](@ref), but `Future`s are not `fetch`ed until the **end** of the
block. This is more efficient, but only works when there are no data dependencies in the
block.
## Examples
This will work:
```julia
@deferred_fetch begin
guild_resp = create(c, Guild; name="foo")
channel_resp = retrieve(c, DiscordChannel, 123)
end
```
This will not, because the second call is dependent on the first value:
```julia
@deferred_fetch begin
guild_resp = create(c, Guild; name="foo")
channels_resp = retrieve(c, DiscordChannel, guild_resp.val)
end
```
"""
macro deferred_fetch(exs...)
validate_fetch(exs...)
fns = length(exs) == 1 ? CRUD_FNS : exs[1:end-1]
ex = deferfn!(exs[end], fns, :fetch)
quote
$ex
end
end
"""
@deferred_fetchval [functions...] block
Identical to [`@deferred_fetch`](@ref), but `Future`s have [`fetchval`](@ref) called on
them instead of `fetch`.
"""
macro deferred_fetchval(exs...)
validate_fetch(exs...)
fns = length(exs) == 1 ? CRUD_FNS : exs[1:end-1]
ex = deferfn!(exs[end], fns, :fetchval)
quote
$ex
end
end
# Validate the arguments to CRUD macros.
function validate_fetch(exs...)
if !(exs[end] isa Expr && exs[end].head === :block)
throw(ArgumentError("Final argument must be a block"))
end
if !all(fn -> fn in CRUD_FNS, exs[1:end-1])
throw(ArgumentError("Only CRUD functions can be wrapped"))
end
end
# Wrap calls to certain functions in a call to another function.
wrapfn!(ex, ::Tuple, ::Symbol) = esc(ex)
function wrapfn!(ex::Expr, fns::Tuple, with::Symbol)
if ex.head === :call && ex.args[1] in fns
ex = :($(esc(with))($(esc(ex))))
else
map!(arg -> wrapfn!(arg, fns, with), ex.args, ex.args)
end
return ex
end
# Defer fetching a Future until the end of a block.
deferfn!(ex, ::Tuple) = (esc(ex), Pair{Symbol, Symbol}[])
function deferfn!(ex::Expr, fns::Tuple)
renames = Pair{Symbol, Symbol}[]
if ex.head === :(=) && ex.args[2] isa Expr && ex.args[2].args[1] in fns
newsym = gensym(ex.args[1])
push!(renames, ex.args[1] => newsym)
ex.args[1] = newsym
map!(esc, ex.args, ex.args)
else
for i in eachindex(ex.args)
ex.args[i], rs = deferfn!(ex.args[i], fns)
append!(renames, rs)
end
end
return ex, renames
end
function deferfn!(ex, fns::Tuple, deferred::Symbol)
ex, renames = deferfn!(ex, fns)
repls = map(r -> :($(esc(r[1])) = $(esc(deferred))($(esc(r[2])))), renames)
append!(ex.args, repls)
return ex
end
"""
Option(; kwargs...) -> ApplicationCommandOption
Helper function that creates an ApplicationCommandOption`
"""
Option(; kwargs...) = ApplicationCommandOption(; type=3, kwargs...)
Option(t::Type; kwargs...) = ApplicationCommandOption(; type=findtype(t), kwargs...)
Option(t::Type, name::String, description::String; kwargs...) = Option(t; name=name, description=description, kwargs...)
Option(t::Type, name::String; kwargs...) = Option(t, name, "NULL"; kwargs...)
Option(name::String, description::String; kwargs...) = ApplicationCommandOption(; type=3, name=name, description=description, kwargs...)
Option(name::String; kwargs...) = Option(name, "NULL"; kwargs...)
"""
Options(args...) -> Vector{ApplicationCommandOption}
Calls [`Option`](@ref) on each Vector in the args.
"""
Options(args...) = [Option(a...) for a in args]
"""
Deprecated, use [`Option`](@ref) instead
"""
opt(; kwargs...) = ApplicationCommandOption(; type=3, kwargs...)
opt(t::Type; kwargs...) = ApplicationCommandOption(type=findtype(t); kwargs...)
const TYPEIND = Dict{Type, Int64}(
String => 3,
Int => 4,
Bool => 5,
User => 6,
DiscordChannel => 7,
Role => 8,
)
function findtype(t::Type)
return TYPEIND[t]
end
"""
opt(ctx::Context)
Helper function that is equivalent to calling `extops(ctx.interaction.data.options)`
"""
function opt(ctx::Context)
if ismissing(ctx.interaction.data.components)
extops(ctx.interaction.data.options)
else
extops(ctx.interaction.data.components, :custom_id)
end
end
"""
extops(ops::Vector)
Creates a Dict of `option name` -> `option value` for the given vector of [`ApplicationCommandOption`](@ref).
If the option is of `Subcommand` type, creates a dict for all its subcommands.
"""
extops(ops::Vector) = Dict([(op.name, Int(op.type) < 3 ? extops(op.options) : op.value) for op in ops])
extops(ops::Vector, kf::Symbol) = extops(ops, kf, :value)
extops(ops::Vector, kf::Symbol, kv::Symbol) = Dict([(getproperty(comp, kf), getproperty(comp, kv)) for comp in vcat([c.components for c in ops]...)])
"""
Return an empty `Dict` if the list of options used is missing.
"""
extops(::Missing) = Dict()
"""
isme(c::Client, ctx::Context) -> Bool
Returns whether the context is a Message Context sent by the bot user.
"""
isme(c::Client, ctx::Context) = hasproperty(ctx, :message) ? (ctx.message.author.id == me(c).id) : false
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 2359 | using Ekztazy
using Distributed
client = Client()
ENV["JULIA_DEBUG"] = Ekztazy
TESTGUILD = ENV["TESTGUILD"]
on_message_create!(client) do (ctx)
(!isme(client, ctx)) && reply(client, ctx, content="$(mention(ctx.message.author)), $(ctx.message.content) TEST")
end
command!(client, TESTGUILD, "boom", "Go boom!") do (ctx)
reply(client, ctx, content="$(mention(ctx)) blew up!")
end
command!(client, TESTGUILD, "bam", "Go bam!") do (ctx)
reply(client, ctx, content="$(mention(ctx)) slapped themselves!")
end
command!(client, TESTGUILD, "double", "Doubles a number!", legacy=false, options=[
Option(Int, name="number", description="The number to double!")
]) do ctx, number
reply(client, ctx, content="$(number*2)")
end
command!(client, TESTGUILD, "multiply", "Multiplies numbers!", legacy=false, options=Options(
[Int, "a", "the first number"],
[Int, "b", "the second number"]
)) do ctx, a::Int, b::Int
reply(client, ctx, content="$(a*b)")
end
command!(client, TESTGUILD, "greet", "Greets a user", legacy=false, options=Options(
[User, "u", "The user to greet"]
)) do ctx, u::Member
reply(client, ctx, content="Hello, $(u)!")
end
command!(client, TESTGUILD, "water", "Water a plant", legacy=false, options=[
Option(Int, "howmuch", "How long do you want to water the plant?")
]) do ctx, howmuch
cm = component!(client, "magic"; auto_ack=false, type=2, style=1, label="Wow, a Button!?") do context
update(client, context, content="You pressed the button!")
end
reply(client, ctx, components=[cm], content="$(mention(ctx)) watered their plant for $(howmuch) hours. So much that the plant grew taller than them!")
end
command!(client, TESTGUILD, "test", "Test something", legacy=false) do ctx
cm = select!(
client,
"class",
("Rogue", "rogue"),
("Mage", "mage");
max_values=1
) do context, choices
reply(client, context, content="You chose $(choices[1])")
end
reply(client, ctx, components=[cm], content="What class you want noob?")
end
command!(client, TESTGUILD, "quit", "Ends the bot process!") do (ctx)
reply(client, ctx, content="Shutting down the bot")
close(client)
end
on_ready!(client) do (ctx)
@info "Successfully logged in as $(ctx.user.username)" # obtain(client, User).username
end
start(client)
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | code | 49 | using Ekztazy
using Test
include("fulltest.jl") | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | docs | 2398 | <div align="center">
<img src="https://github.com/Humans-of-Julia/Ekztazy.jl/blob/master/docs/src/assets/logo.png?raw=true" width = "100" height = "100" style="align: center">
| Documentation | Build |
| -- | -- |
| [](https://humans-of-julia.github.io/Ekztazy.jl/stable/) [](https://humans-of-julia.github.io/Ekztazy.jl/dev/)| [](https://github.com/Humans-of-Julia/Ekztazy.jl/actions/workflows/ci.yml) |
</div>
Ekztazy.jl is the spiritual successor to [Discord.jl](https://github.com/Xh4H/Discord.jl). It is a maintained Julia Pkg for creating simple yet efficient [Discord](https://discord.com) bots.
* Strong, expressive type system: No fast-and-loose JSON objects here.
* Non-blocking: API calls return immediately and can be awaited when necessary.
* Simple: Multiple dispatch allows for a [small, elegant core API](https://Humans-of-Julia.github.io/Ekztazy.jl/stable/rest.html#CRUD-API-1).
* Fast: Julia is [fast like C but still easy like Python](https://julialang.org/blog/2012/02/why-we-created-julia).
* Robust: Resistant to bad event handlers and/or requests. Errors are introspectible for debugging.
* Lightweight: Cache what is important but shed dead weight with [TTL](https://en.wikipedia.org/wiki/Time_to_live).
* Gateway independent: Ability to interact with Discord's API without establishing a gateway connection.
* Distributed: [Process-based sharding](https://Humans-of-Julia.github.io/Ekztazy.jl/stable/client.html#Ekztazy.Client) requires next to no intervention and you can even run shards on separate machines.
Ekztazy.jl can be added like this:
```julia
] add Ekztazy
```
# Example
```julia
# Discord Token and Application ID should be saved in Env vars
client = Client()
# Guild to register the command in
TESTGUILD = ENV["TESTGUILD"]
command!(client, TESTGUILD, "double", "Doubles a number!", options=[opt(name="number", description="The number to double!")]) do (ctx)
Ekztazy.reply(client, ctx, content="$(parse(Int, opt(ctx)["number"])*2)")
end
on_ready!(client) do (ctx)
@info "Successfully logged in as $(ctx.user.username)"
end
start(client)
```
Many thanks to [@Xh4H](https://github.com/Xh4H) for Discord.jl which this relied heavily on.
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | docs | 339 | ```@meta
CurrentModule = Ekztazy
```
# Client
```@docs
Client
enable_cache!
disable_cache!
me
```
## Gateway
```@docs
Base.open
Base.isopen
Base.close
Base.wait
request_guild_members
update_voice_state
update_status
heartbeat_ping
start
```
## Caching
```@docs
CacheStrategy
CacheForever
CacheNever
CacheTTL
CacheLRU
CacheFilter
```
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | docs | 4243 | ```@meta
CurrentModule = Ekztazy
```
# Events
Events are how Discord communicates and interacts with our application.
In Ekztazy.jl, we use the [`Handler`](@ref) type in conjunction with the [`Context`](@ref) type to handle them.
## Handler
A Handler is simple to register for an event.
To register a handler for the Ready event:
```julia
# `c` is a client generated previously.
h = OnReady() do (ctx)
println("I'm ready!")
end
add_handler!(c, h)
```
There is also a convenience method to create handlers.
As such the following code is fully equivalent:
```julia
on_ready!(c) do (ctx)
println("I'm ready!")
end
```
(on_ready! creates an OnReady handler and adds it to the client.)
You can find a list of all gateway events [here](https://discord.com/developers/docs/topics/gateway#commands-and-events-gateway-events).
Simply add `On` before their NAME to get the name of the associated handler!
```@docs
Handler
```
## Context
The context is simply the payload received from the interaction. Exceptions:
- The `MessageCreate` context contains a [`Message`](@ref).
- The `OnGuildCreate` and `OnGuildUpdate` contexts contain a [`Guild`](@ref).
- The `OnInteractionCreate` context contains an [`Interaction`](@ref).
You can find the expected payloads for events [here](https://discord.com/developers/docs/topics/gateway#commands-and-events-gateway-events).
```@docs
Context
```
# Commands
Commands are the core of any Discord Application, and as such they are also the core of Ekztazy. Let us define the most basic of functions there can be.
```julia
# ... c is a client struct
# ... g is a guild id for testing
command!(ctx->reply(c, ctx, content="pong"), c, g, "ping", "Ping!")
```
For more information on each of the arguments, check the documentation of [`command!`](@ref) below. For more complicated functions, there are two ways for them to work. The default Discord.jl inspired "legacy style", which is deprecated, and the "new style". Let us look at both of these with two more complicated commands.
`New style`
```julia
# ... c is a client struct
# ... g is a guild id for testing
command!(c, g, "greet", "greets a user", legacy=false, options=Options(
[User, "u", "The user to greet"]
)) do ctx, u::Member
reply(client, ctx, content="Hello, $(u)!")
end
command!(c, g, "eval", "evaluates a string as julia code", options=Options(
[String, "str", "the string to evaluate"]
)) do ctx, str::String
@info "$str"
reply(c, ctx, content="```julia\n$(eval(Meta.parse(str)))\n```")
end
```
`Legacy style`
```julia
# ... c is a client struct
# ... g is a guild id for testing
command!(c, g, "greet", "greets a user", options=[
opt("u", "the user to greet")
]) do ctx
reply(client, ctx, content="Hello, <@$(opt(ctx)["u"])>!")
end
command!(c, g, "eval", "evaluates a string as julia code", options=[
opt("str", "the string to evaluate")
]) do ctx
reply(c, ctx, content="```julia\n$(eval(Meta.parse(opt(ctx)["str"])))\n```")
end
```
There a few key differences. In the legacy style, we have to use the [`opt`](@ref) function to get a Dict of Option name => User input, in the new style these are automatically provided to the handler function and are automatically type converted to the right type. In the legacy style, all command options are string, in the new style they can be strings, ints, bools, [`User`](@ref)s, [`Role`](@ref)s and [`DiscordChannel`](@ref)s. You can check examples to see many different command definitions.
## Handlers
```@docs
command!
component!
on_message_create!
on_guild_members_chunk!
on_channel_delete!
on_guild_integrations_update!
on_guild_member_update!
on_presence_update!
on_channel_create!
on_message_delete_bulk!
on_message_reaction_add!
on_guild_role_delete!
on_ready!
on_user_update!
on_guild_create!
on_guild_member_remove!
on_typing_start!
on_message_update!
on_guild_emojis_update!
on_interaction_create!
on_guild_delete!
on_voice_state_update!
on_guild_member_add!
on_guild_ban_remove!
on_guild_role_update!
on_guild_role_create!
on_voice_server_update!
on_guild_ban_add!
on_message_reaction_remove_all!
on_channel_pins_update!
on_resumed!
on_guild_update!
on_message_delete!
on_webhooks_update!
on_channel_update!
on_message_reaction_remove!
``` | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | docs | 291 | ```@meta
CurrentModule = Ekztazy
```
# Helpers
```@docs
STYLES
Permission
has_permission
permissions_in
reply
filter_ranges
split_message
plaintext
upload_file
set_game
opt
Option
extops
isme
method_args
add_handler!
Options
mention
@fetch
@fetchval
@deferred_fetch
@deferred_fetchval
```
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | docs | 5789 | ```@meta
CurrentModule = Ekztazy
```
## Index
### Introduction
Welcome to Ekztazy.jl
Ekztazy.jl is the spiritual successor to Discord.jl. It is a maintained Julia Pkg for creating simple yet efficient Discord bots.
- Strong, expressive type system: No fast-and-loose JSON objects here.
- Non-blocking: API calls return immediately and can be awaited when necessary.
- Simple: Multiple dispatch allows for a small, elegant core API.
- Fast: Julia is fast like C but still easy like Python.
- Robust: Resistant to bad event handlers and/or requests. Errors are introspectible for debugging.
- Lightweight: Cache what is important but shed dead weight with TTL.
- Gateway independent: Ability to interact with Discord's API without establishing a gateway connection.
- Distributed: Process-based sharding requires next to no intervention and you can even run shards on separate machines.
### Getting Started
You can add Ekztazy.jl from Git using the following command in the REPL:
```julia
] add https://github.com/Humans-of-Julia/Ekztazy.jl
```
The most important type when working with Ekztazy.jl is the [`Client`](@ref).
Most applications will start in a similar fashion to this:
```julia
using Ekztazy
client = Client()
```
This will create a [`Client`](@ref) using default parameters. This expects two environment variables:
- APPLICATION_ID, the bot's application id
- DISCORD_TOKEN, the bot's secret token
These can also be specified.
```julia
using Ekztazy
client = Client(
discord_token,
application_id,
intents(GUILDS, GUILD_MESSAGES)
)
```
(Assuming discord\_token is a String and applicaton\_id is an Int).
For a more complete list of parameters for creating a [`Client`](@ref). Check the Client documentation.
Usually when working with Ekztazy, we will either want to handle messages or commands. Let's start with messages.
```julia
# ...
on_message_create!(client) do (ctx)
if ctx.message.author.id != me(client).id
reply(client, ctx, content="I received the following message: $(ctx.message.content).")
end
end
start(client)
```
Let's look this code. First we are using the [`on_message_create!`](@ref) function which generates a [`Handler`](@ref). (For more information on this, check the events documentation). Then in the handling function we start by checking if the the message author's id isn't the same as the the bot's. This is sensible, as we wouldn't want the bot to indefinitely respond to itself. Finally, we use the [`reply`](@ref) function to reply to the message! Under the hood, the reply function appraises the context, and finds a way to reply to it, the `kwargs` passed to it are then made into the request body. Here, in the message we use interpolation to send the message's content. We finish by calling [`start`](@ref) on the client.
Next, commands.
```julia
# ...
g = ENV["MY_TESTING_GUILD"]
command!(client, g, "double", "Doubles a number!", options=[opt(name="number", description="The number to double!")]) do (ctx)
Ekztazy.reply(client, ctx, content="$(parse(Int, opt(ctx)["number"])*2)")
end
start(client)
```
Let's look at this code again. First we are using the [`command!`](@ref) function. This creates a command with the specified parameters. We are also using the helper [`opt`](@ref) method, to generate and get options. Calling opt with a name and description will create an option, using it on a context will get the values the user provided for each option in a Dict. Like in the previous example we are using the magic [`reply`](@ref) function that creates a followup message for the interaction. (This does not strictly reply to the interaction. Interactions instantly get ACKd by Ekztazy.jl to prevent your handling implementation from exceeding the interaction's 3s reply time limit.)
Here is the equivalent using the new system. The old system is deprecated and will be removed in the next major version.
```julia
# ...
g = ENV["MY_TESTING_GUILD"]
command!(client, g, "double", "Doubles a number!", legacy=false, options=Options(
[Int, "num", "The number to double!"]
) do ctx, num::Int
reply(client, ctx, content="$(num*2)")
end
start(client)
```
The option `num` will magically be passed to the handler to be used directly, no need to use [`opt`] anywhere anymore. The value for num is also automatically converted to an Int.
Sometimes we may also want to do things without waiting for user input. However putting such code in the top scope would never be executed as [`start`](@ref) is blocking. This is where [`on_ready!`](@ref) comes in.
```julia
# ...
CHID = 776251117616234509 # Testing channel ID
on_ready!(client) do (ctx)
button = component!(client, "ar00"; type=2, style=1, label="Really??") do (ctx)
Ekztazy.reply(client, ctx, content="Yes!")
end
create(client, Message, CHID, content="I am ready!", components=[button])
end
```
Here, we first are using the [`on_ready!`](@ref). This is called as soon as the bot is ready (see the events documentation). We are then creating a [`Component`](@ref) and a handler for it.
The component's handler simply replies with "Yes!", as usual using the [`reply`](@ref) function. Next we have a new function, [`create`](@ref). This simply sends message to the specificed channel id (See the REST API documentation for more info). Here the message content is simply "I am ready". It also has a component.
The component we created previously is of type 2, it's a button. It cannot be sent directly and needs to be wrapped in an action row of type 1, however Ekztazy is very nice so it'll do that for you.
This is all you should need for most Discord bot projects! For any question please join the [Humans of Julia Discord](https://discord.gg/C5h9D4j), and look for me @Kyando#0001!
```@index
``` | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | docs | 4149 | ```@meta
CurrentModule = Ekztazy
```
# REST API
## Response
```@docs
Response
fetchval
```
## CRUD API
On top of functions for accessing individual endpoints such as [`get_channel_messages`](@ref), Ekztazy.jl also offers a unified API with just four functions.
Named after [the **CRUD** model](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete), they cover most of the Discord REST API and allow you to write concise, expressive code, and forget about the subtleties of endpoint naming.
The argument ordering convention is roughly as follows:
1. A [`Client`](@ref), always.
2. For cases when we don't yet have the entity to be manipulated (usually [`create`](@ref) and [`retrieve`](@ref)), the entity's type.
If we do have the entity ([`update`](@ref) and [`delete`](@ref)), the entity itself.
4. The remaining positional arguments supply whatever context is needed to specify the entity.
For example, sending a message requires a [`DiscordChannel`](@ref) parameter.
5. Keyword arguments follow (usually for [`create`](@ref) and [`update`](@ref)).
```@docs
create
retrieve
update
delete
obtain
```
The full list of types available to be manipulated is:
* [`AuditLog`](@ref)
* [`ApplicationCommand`](@ref)
* [`Ban`](@ref)
* [`DiscordChannel`](@ref)
* [`Emoji`](@ref)
* [`GuildEmbed`](@ref)
* [`Guild`](@ref)
* [`Integration`](@ref)
* [`Invite`](@ref)
* [`Member`](@ref)
* [`Message`](@ref)
* [`Overwrite`](@ref)
* [`Reaction`](@ref)
* [`Role`](@ref)
* [`User`](@ref)
* [`VoiceRegion`](@ref)
* [`Webhook`](@ref)
## Endpoints
Functions which wrap REST API endpoints are named and sorted according to the [Discord API documentation](https://discordapp.com/developers/docs/resources/audit-log).
When a function accepts keyword arguments, the docstring will include a link to the Discord documentation which indicates the expected keys and values.
Remember that the return types annotated below are not the actual return types, but the types of [`Response`](@ref) that the returned `Future`s will yield.
## Audit Log
```@docs
get_guild_audit_log
```
## Channel
```@docs
get_channel
modify_channel
delete_channel
get_channel_messages
get_channel_message
create_message
create_reaction
delete_own_reaction
delete_user_reaction
get_reactions
delete_all_reactions
edit_message
delete_message
bulk_delete_messages
edit_channel_permissions
get_channel_invites
create_channel_invite
delete_channel_permission
trigger_typing_indicator
get_pinned_messages
add_pinned_channel_message
delete_pinned_channel_message
```
## Emoji
```@docs
list_guild_emojis
get_guild_emoji
create_guild_emoji
modify_guild_emoji
delete_guild_emoji
```
## Guild
```@docs
create_guild
get_guild
modify_guild
delete_guild
get_guild_channels
create_guild_channel
modify_guild_channel_positions
get_guild_member
list_guild_members
add_guild_member
modify_guild_member
modify_current_user_nick
add_guild_member_role
remove_guild_member_role
remove_guild_member
get_guild_bans
get_guild_ban
create_guild_ban
remove_guild_ban
get_guild_roles
create_guild_role
modify_guild_role_positions
modify_guild_role
delete_guild_role
get_guild_prune_count
begin_guild_prune
get_guild_voice_regions
get_guild_invites
get_guild_integrations
create_guild_integration
modify_guild_integration
delete_guild_integration
sync_guild_integration
get_guild_embed
modify_guild_embed
get_vanity_url
get_guild_widget_image
```
## Invite
```@docs
get_invite
delete_invite
```
## User
```@docs
get_current_user
get_user
modify_current_user
get_current_user_guilds
leave_guild
create_dm
```
## Voice
```@docs
list_voice_regions
```
## Webhook
```@docs
create_webhook
get_channel_webhooks
get_guild_webhooks
get_webhook
get_webhook_with_token
modify_webhook
modify_webhook_with_token
delete_webhook
delete_webhook_with_token
execute_webhook
execute_slack_compatible_webhook
execute_github_compatible_webhook
```
## Interaction
```@docs
create_application_command
get_application_commands
respond_to_interaction
create_followup_message
ack_interaction
update_ack_interaction
update_message_int
create_followup_message
edit_interaction
bulk_overwrite_application_commands
```
| Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.8.0 | da36639322f6789ee33aa2d4c55186f6b2bccdd2 | docs | 812 | ```@meta
CurrentModule = Ekztazy
```
# Types
```@docs
Message
AbstractGuild
Guild
Activity
ActivityTimestamps
ActivityParty
ActivityAssets
ActivitySecrets
ActivityType
ActivityFlags
ApplicationCommand
ApplicationCommandOption
ApplicationCommandChoice
Attachment
AuditLog
AuditLogEntry
AuditLogChange
AuditLogOptions
ActionType
Ban
DiscordChannel
Connection
Component
Embed
EmbedThumbnail
EmbedVideo
EmbedImage
EmbedProvider
EmbedAuthor
EmbedFooter
EmbedField
Emoji
UnavailableGuild
VerificationLevel
MessageNotificationLevel
ExplicitContentFilterLevel
MFALevel
GuildEmbed
Integration
IntegrationAccount
Interaction
InteractionData
Invite
InviteMetadata
Member
MessageActivity
MessageApplication
MessageType
MessageActivityType
Overwrite
Presence
Reaction
Role
SelectOption
User
VoiceRegion
VoiceState
Webhook
``` | Ekztazy | https://github.com/Humans-of-Julia/Ekztazy.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 44542 | ### A Pluto.jl notebook ###
# v0.19.27
using Markdown
using InteractiveUtils
# ╔═╡ 9ee33338-6985-4745-ab74-7caeac73496e
begin
using MLJ
using Random
using DataFrames
using ModalDecisionTrees
using SoleModels
# Load an example time-series classification dataset as a tuple (DataFrame, Vector{String})
X, y = SoleData.load_arff_dataset("NATOPS")
end
# ╔═╡ 65141ec2-4da9-11ee-2a0a-8974a3ec37da
md"""
# ModalDecisionTrees.jl Demo
"""
# ╔═╡ dea67564-e905-4a49-9717-ca8071a4d489
md"""
## Learning and inspecting a Modal Decision Tree
"""
# ╔═╡ 99b1da9b-517a-4a84-8d0c-26e9f9ff6e15
# Instantiate the modal extension of CART, that uses relations from the coarser Interval Algebra "IA7"
model = ModalDecisionTree(; relations = :IA7, downsize = (15,))
# ╔═╡ a00c4abe-5534-4c42-9464-d5a6867b9802
begin
# Randomly split the data: 20% training, 80% testing
N = nrow(X)
perm = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = perm[1:round(Int, N*.2)], perm[round(Int, N*.2)+1:end]
mach = machine(model, X, y)
# Train.
@time fit!(mach; rows=train_idxs)
# Compute accuracy
yhat = predict_mode(mach, X[test_idxs,:])
MLJ.accuracy(yhat, y[test_idxs])
end
# ╔═╡ 7655458a-d5af-44d9-9ff6-74db67f3d5d4
# Print model
report(mach).printmodel()
# ╔═╡ 66ec0d97-a221-4363-a21d-1c15835645d2
begin
# Print model in a condensed form (useful for checking which variables were useful)
report(mach).printmodel(false)
# Feature importances
feature_importances(mach)
end
# ╔═╡ b6d6ed79-9abc-47a6-bbbc-1a1610ffb830
begin
# Access model
tree_train = report(mach).model
# Extract the corresponding ruleset
ruleset = listrules(tree_train);
# Print ruleset
printmodel.(ruleset; show_metrics = true, threshold_digits = 2, variable_names_map = [names(X)], parenthesize_atoms = false);
end
# ╔═╡ 00310a1c-c4f4-43bc-a60c-a6113434f242
begin
# Sprinkle the model with the test instances!
predictions, tree_test = report(mach).sprinkle(X[test_idxs,:], y[test_idxs]);
# Extract ruleset and print its metrics
ruleset_test = listrules(tree_test)
printmodel.(ruleset_test; show_metrics = true, threshold_digits = 2, variable_names_map = [names(X)]);
end
# ╔═╡ b3417b90-c910-407a-939b-5190602c5899
begin
# In the classification scenario, rules for the same class can be joined via logical conjunction (∨)
joined_ruleset_test = joinrules(ruleset_test)
printmodel.(joined_ruleset_test; show_metrics = true, variable_names_map = [names(X)], threshold_digits = 3);
end
# ╔═╡ 3cdf0a35-edd0-4a02-9410-94e828d0f519
begin
# This model has a decent accuracy by the way. TODO
evaluate!(mach,
resampling=StratifiedCV(; nfolds = 3, shuffle=true),
measures=[accuracy],
verbosity=0,
check_measure=false
)
end
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7"
ModalDecisionTrees = "e54bda2e-c571-11ec-9d64-0242ac120002"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SoleModels = "4249d9c7-3290-4ddd-961c-e1d3ec2467f8"
[compat]
DataFrames = "~1.6.1"
MLJ = "~0.19.5"
ModalDecisionTrees = "~0.1.7"
SoleModels = "~0.4.0"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.0"
manifest_format = "2.0"
project_hash = "0bd92b94e833523a0cbe33d6155901e25a2ba45f"
[[deps.ARFFFiles]]
deps = ["CategoricalArrays", "Dates", "Parsers", "Tables"]
git-tree-sha1 = "e8c8e0a2be6eb4f56b1672e46004463033daa409"
uuid = "da404889-ca92-49ff-9e8b-0aa6b4d38dc8"
version = "1.4.1"
[[deps.AbstractTrees]]
git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.4.4"
[[deps.Adapt]]
deps = ["LinearAlgebra", "Requires"]
git-tree-sha1 = "76289dc51920fdc6e0013c872ba9551d54961c24"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.6.2"
weakdeps = ["StaticArrays"]
[deps.Adapt.extensions]
AdaptStaticArraysExt = "StaticArrays"
[[deps.ArgCheck]]
git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4"
uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197"
version = "2.3.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.ArnoldiMethod]]
deps = ["LinearAlgebra", "Random", "StaticArrays"]
git-tree-sha1 = "62e51b39331de8911e4a7ff6f5aaf38a5f4cc0ae"
uuid = "ec485272-7323-5ecc-a04f-4719b315124d"
version = "0.2.0"
[[deps.ArrayInterface]]
deps = ["Adapt", "LinearAlgebra", "Requires", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "f83ec24f76d4c8f525099b2ac475fc098138ec31"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "7.4.11"
[deps.ArrayInterface.extensions]
ArrayInterfaceBandedMatricesExt = "BandedMatrices"
ArrayInterfaceBlockBandedMatricesExt = "BlockBandedMatrices"
ArrayInterfaceCUDAExt = "CUDA"
ArrayInterfaceGPUArraysCoreExt = "GPUArraysCore"
ArrayInterfaceStaticArraysCoreExt = "StaticArraysCore"
ArrayInterfaceTrackerExt = "Tracker"
[deps.ArrayInterface.weakdeps]
BandedMatrices = "aae01518-5342-5314-be14-df237901396f"
BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.BangBang]]
deps = ["Compat", "ConstructionBase", "InitialValues", "LinearAlgebra", "Requires", "Setfield", "Tables"]
git-tree-sha1 = "e28912ce94077686443433c2800104b061a827ed"
uuid = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
version = "0.3.39"
[deps.BangBang.extensions]
BangBangChainRulesCoreExt = "ChainRulesCore"
BangBangDataFramesExt = "DataFrames"
BangBangStaticArraysExt = "StaticArrays"
BangBangStructArraysExt = "StructArrays"
BangBangTypedTablesExt = "TypedTables"
[deps.BangBang.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
StructArrays = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
TypedTables = "9d95f2ec-7b3d-5a63-8d20-e2491e220bb9"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Baselet]]
git-tree-sha1 = "aebf55e6d7795e02ca500a689d326ac979aaf89e"
uuid = "9718e550-a3fa-408a-8086-8db961cd8217"
version = "0.1.1"
[[deps.BenchmarkTools]]
deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"]
git-tree-sha1 = "d9a9701b899b30332bbcb3e1679c41cce81fb0e8"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "1.3.2"
[[deps.BitFlags]]
git-tree-sha1 = "43b1a4a8f797c1cddadf60499a8a077d4af2cd2d"
uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35"
version = "0.1.7"
[[deps.CSV]]
deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "PrecompileTools", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings", "WorkerUtilities"]
git-tree-sha1 = "44dbf560808d49041989b8a96cae4cffbeb7966a"
uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
version = "0.10.11"
[[deps.Calculus]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f641eb0a4f00c343bbc32346e1217b86f3ce9dad"
uuid = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
version = "0.5.1"
[[deps.Catch22]]
deps = ["DelimitedFiles", "DimensionalData", "Libdl", "LinearAlgebra", "Pkg", "ProgressLogging", "Reexport", "Requires", "Statistics", "catch22_jll"]
git-tree-sha1 = "319c5c8e66fb45a3d5a91864fdebe01ec314b9dc"
uuid = "acdeb78f-3d39-4310-8fdf-6d75c17c6d5a"
version = "0.4.4"
[[deps.CategoricalArrays]]
deps = ["DataAPI", "Future", "Missings", "Printf", "Requires", "Statistics", "Unicode"]
git-tree-sha1 = "1568b28f91293458345dabba6a5ea3f183250a61"
uuid = "324d7699-5711-5eae-9e2f-1d82baa6b597"
version = "0.10.8"
[deps.CategoricalArrays.extensions]
CategoricalArraysJSONExt = "JSON"
CategoricalArraysRecipesBaseExt = "RecipesBase"
CategoricalArraysSentinelArraysExt = "SentinelArrays"
CategoricalArraysStructTypesExt = "StructTypes"
[deps.CategoricalArrays.weakdeps]
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
SentinelArrays = "91c51154-3ec4-41a3-a24f-3f23e20d615c"
StructTypes = "856f2bd8-1eba-4b0a-8007-ebc267875bd4"
[[deps.CategoricalDistributions]]
deps = ["CategoricalArrays", "Distributions", "Missings", "OrderedCollections", "Random", "ScientificTypes"]
git-tree-sha1 = "ed760a4fde49997ff9360a780abe6e20175162aa"
uuid = "af321ab8-2d2e-40a6-b165-3d674595d28e"
version = "0.1.11"
[deps.CategoricalDistributions.extensions]
UnivariateFiniteDisplayExt = "UnicodePlots"
[deps.CategoricalDistributions.weakdeps]
UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
[[deps.CodeTracking]]
deps = ["InteractiveUtils", "UUIDs"]
git-tree-sha1 = "c0216e792f518b39b22212127d4a84dc31e4e386"
uuid = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
version = "1.3.5"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "02aa26a4cf76381be7f66e020a3eddeb27b0a092"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.2"
[[deps.ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "eb7f0f8307f71fac7c606984ea5fb2817275d6e4"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.4"
[[deps.Combinatorics]]
git-tree-sha1 = "08c8b6831dc00bfea825826be0bc8336fc369860"
uuid = "861a8166-3701-5b0c-9a16-15d98fcdc6aa"
version = "1.0.2"
[[deps.Compat]]
deps = ["UUIDs"]
git-tree-sha1 = "8a62af3e248a8c4bad6b32cbbe663ae02275e32c"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "4.10.0"
weakdeps = ["Dates", "LinearAlgebra"]
[deps.Compat.extensions]
CompatLinearAlgebraExt = "LinearAlgebra"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "1.0.2+0"
[[deps.CompositionsBase]]
git-tree-sha1 = "802bb88cd69dfd1509f6670416bd4434015693ad"
uuid = "a33af91c-f02d-484b-be07-31d278c5ca2b"
version = "0.1.2"
[deps.CompositionsBase.extensions]
CompositionsBaseInverseFunctionsExt = "InverseFunctions"
[deps.CompositionsBase.weakdeps]
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.ComputationalResources]]
git-tree-sha1 = "52cb3ec90e8a8bea0e62e275ba577ad0f74821f7"
uuid = "ed09eef8-17a6-5b46-8889-db040fac31e3"
version = "0.3.2"
[[deps.ConcurrentUtilities]]
deps = ["Serialization", "Sockets"]
git-tree-sha1 = "5372dbbf8f0bdb8c700db5367132925c0771ef7e"
uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb"
version = "2.2.1"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "c53fc348ca4d40d7b371e71fd52251839080cbc9"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.5.4"
weakdeps = ["IntervalSets", "StaticArrays"]
[deps.ConstructionBase.extensions]
ConstructionBaseIntervalSetsExt = "IntervalSets"
ConstructionBaseStaticArraysExt = "StaticArrays"
[[deps.Crayons]]
git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.1.1"
[[deps.DataAPI]]
git-tree-sha1 = "8da84edb865b0b5b0100c0666a9bc9a0b71c553c"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.15.0"
[[deps.DataFrames]]
deps = ["Compat", "DataAPI", "DataStructures", "Future", "InlineStrings", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrecompileTools", "PrettyTables", "Printf", "REPL", "Random", "Reexport", "SentinelArrays", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"]
git-tree-sha1 = "04c738083f29f86e62c8afc341f0967d8717bdb8"
uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
version = "1.6.1"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "3dbd312d370723b6bb43ba9d02fc36abade4518d"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.15"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DefineSingletons]]
git-tree-sha1 = "0fba8b706d0178b4dc7fd44a96a92382c9065c2c"
uuid = "244e2a9f-e319-4986-a169-4d1fe445cd52"
version = "0.1.2"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
git-tree-sha1 = "9e2f36d3c96a820c678f2f1f1782582fcf685bae"
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
version = "1.9.1"
[[deps.Dictionaries]]
deps = ["Indexing", "Random", "Serialization"]
git-tree-sha1 = "e82c3c97b5b4ec111f3c1b55228cebc7510525a2"
uuid = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
version = "0.3.25"
[[deps.DimensionalData]]
deps = ["Adapt", "ArrayInterface", "ConstructionBase", "Dates", "Extents", "IntervalSets", "IteratorInterfaceExtensions", "LinearAlgebra", "PrecompileTools", "Random", "RecipesBase", "SparseArrays", "Statistics", "TableTraits", "Tables"]
git-tree-sha1 = "8a6e9c0ac3a861b983af862cefabc12519884a13"
uuid = "0703355e-b756-11e9-17c0-8b28908087d0"
version = "0.24.13"
[[deps.Distances]]
deps = ["LinearAlgebra", "Statistics", "StatsAPI"]
git-tree-sha1 = "5225c965635d8c21168e32a12954675e7bea1151"
uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
version = "0.10.10"
[deps.Distances.extensions]
DistancesChainRulesCoreExt = "ChainRulesCore"
DistancesSparseArraysExt = "SparseArrays"
[deps.Distances.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.Distributions]]
deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "Test"]
git-tree-sha1 = "3d5873f811f582873bb9871fc9c451784d5dc8c7"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.102"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
DistributionsDensityInterfaceExt = "DensityInterface"
[deps.Distributions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DensityInterface = "b429d917-457f-4dbc-8f4c-0cc954292b1d"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.3"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.DualNumbers]]
deps = ["Calculus", "NaNMath", "SpecialFunctions"]
git-tree-sha1 = "5837a837389fccf076445fce071c8ddaea35a566"
uuid = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
version = "0.6.8"
[[deps.EarlyStopping]]
deps = ["Dates", "Statistics"]
git-tree-sha1 = "98fdf08b707aaf69f524a6cd0a67858cefe0cfb6"
uuid = "792122b4-ca99-40de-a6bc-6742525f08b6"
version = "0.3.0"
[[deps.ExceptionUnwrapping]]
deps = ["Test"]
git-tree-sha1 = "e90caa41f5a86296e014e148ee061bd6c3edec96"
uuid = "460bff9d-24e4-43bc-9d9f-a8973cb893f4"
version = "0.1.9"
[[deps.Extents]]
git-tree-sha1 = "5e1e4c53fa39afe63a7d356e30452249365fba99"
uuid = "411431e0-e8b7-467b-b5e0-f676ba4f2910"
version = "0.1.1"
[[deps.FilePathsBase]]
deps = ["Compat", "Dates", "Mmap", "Printf", "Test", "UUIDs"]
git-tree-sha1 = "9f00e42f8d99fdde64d40c8ea5d14269a2e2c1aa"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.21"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
[[deps.FillArrays]]
deps = ["LinearAlgebra", "Random"]
git-tree-sha1 = "a20eaa3ad64254c61eeb5f230d9306e937405434"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "1.6.1"
weakdeps = ["SparseArrays", "Statistics"]
[deps.FillArrays.extensions]
FillArraysSparseArraysExt = "SparseArrays"
FillArraysStatisticsExt = "Statistics"
[[deps.FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[deps.FunctionWrappers]]
git-tree-sha1 = "d62485945ce5ae9c0c48f124a84998d755bae00e"
uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e"
version = "1.1.3"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.Graphs]]
deps = ["ArnoldiMethod", "Compat", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"]
git-tree-sha1 = "899050ace26649433ef1af25bc17a815b3db52b7"
uuid = "86223c79-3864-5bf0-83f7-82e725a168b6"
version = "1.9.0"
[[deps.HTTP]]
deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"]
git-tree-sha1 = "5eab648309e2e060198b45820af1a37182de3cce"
uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3"
version = "1.10.0"
[[deps.HypergeometricFunctions]]
deps = ["DualNumbers", "LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "f218fe3736ddf977e0e772bc9a586b2383da2685"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.23"
[[deps.Indexing]]
git-tree-sha1 = "ce1566720fd6b19ff3411404d4b977acd4814f9f"
uuid = "313cdc1a-70c2-5d6a-ae34-0150d3930a38"
version = "1.1.1"
[[deps.Inflate]]
git-tree-sha1 = "ea8031dea4aff6bd41f1df8f2fdfb25b33626381"
uuid = "d25df0c9-e2be-5dd7-82c8-3ad0b3e990b9"
version = "0.1.4"
[[deps.InitialValues]]
git-tree-sha1 = "4da0f88e9a39111c2fa3add390ab15f3a44f3ca3"
uuid = "22cec73e-a1b8-11e9-2c92-598750a2cf9c"
version = "0.3.1"
[[deps.InlineStrings]]
deps = ["Parsers"]
git-tree-sha1 = "9cc2baf75c6d09f9da536ddf58eb2f29dedaf461"
uuid = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
version = "1.4.0"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.IntervalSets]]
deps = ["Dates", "Random"]
git-tree-sha1 = "8e59ea773deee525c99a8018409f64f19fb719e6"
uuid = "8197267c-284f-5f27-9208-e0e47529a953"
version = "0.7.7"
weakdeps = ["Statistics"]
[deps.IntervalSets.extensions]
IntervalSetsStatisticsExt = "Statistics"
[[deps.InvertedIndices]]
git-tree-sha1 = "0dc7b50b8d436461be01300fd8cd45aa0274b038"
uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
version = "1.3.0"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
[[deps.IterTools]]
git-tree-sha1 = "4ced6667f9974fc5c5943fa5e2ef1ca43ea9e450"
uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
version = "1.8.0"
[[deps.IterationControl]]
deps = ["EarlyStopping", "InteractiveUtils"]
git-tree-sha1 = "d7df9a6fdd82a8cfdfe93a94fcce35515be634da"
uuid = "b3c1a2ee-3fec-4384-bf48-272ea71de57c"
version = "0.5.3"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Artifacts", "Preferences"]
git-tree-sha1 = "7e5d6779a1e09a36db2a7b6cff50942a0a7d0fca"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.5.0"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.4"
[[deps.JuliaInterpreter]]
deps = ["CodeTracking", "InteractiveUtils", "Random", "UUIDs"]
git-tree-sha1 = "81dc6aefcbe7421bd62cb6ca0e700779330acff8"
uuid = "aa1ae85d-cabe-5617-a682-6adf51b2e16a"
version = "0.9.25"
[[deps.LaTeXStrings]]
git-tree-sha1 = "f2355693d6778a178ade15952b7ac47a4ff97996"
uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
version = "1.3.0"
[[deps.LatinHypercubeSampling]]
deps = ["Random", "StableRNGs", "StatsBase", "Test"]
git-tree-sha1 = "825289d43c753c7f1bf9bed334c253e9913997f8"
uuid = "a5e1c1ea-c99a-51d3-a14d-a9a37257b02d"
version = "1.9.0"
[[deps.Lazy]]
deps = ["MacroTools"]
git-tree-sha1 = "1370f8202dac30758f3c345f9909b97f53d87d3f"
uuid = "50d2b5c4-7a5e-59d5-8109-a42b560f39c0"
version = "0.15.1"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "7d6dd4e9212aebaeed356de34ccf262a3cd415aa"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.26"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables"
LogExpFunctionsInverseFunctionsExt = "InverseFunctions"
[deps.LogExpFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.LoggingExtras]]
deps = ["Dates", "Logging"]
git-tree-sha1 = "c1dd6d7978c12545b4179fb6153b9250c96b0075"
uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36"
version = "1.0.3"
[[deps.LossFunctions]]
deps = ["Markdown", "Requires", "Statistics"]
git-tree-sha1 = "df9da07efb9b05ca7ef701acec891ee8f73c99e2"
uuid = "30fc2ffe-d236-52d8-8643-a9d8f7c094a7"
version = "0.11.1"
weakdeps = ["CategoricalArrays"]
[deps.LossFunctions.extensions]
LossFunctionsCategoricalArraysExt = "CategoricalArrays"
[[deps.LoweredCodeUtils]]
deps = ["JuliaInterpreter"]
git-tree-sha1 = "60168780555f3e663c536500aa790b6368adc02a"
uuid = "6f1432cf-f94c-5a45-995e-cdbf5db27b0b"
version = "2.3.0"
[[deps.MLFlowClient]]
deps = ["Dates", "FilePathsBase", "HTTP", "JSON", "ShowCases", "URIs", "UUIDs"]
git-tree-sha1 = "32cee10a6527476bef0c6484ff4c60c2cead5d3e"
uuid = "64a0f543-368b-4a9a-827a-e71edb2a0b83"
version = "0.4.4"
[[deps.MLJ]]
deps = ["CategoricalArrays", "ComputationalResources", "Distributed", "Distributions", "LinearAlgebra", "MLJBase", "MLJEnsembles", "MLJFlow", "MLJIteration", "MLJModels", "MLJTuning", "OpenML", "Pkg", "ProgressMeter", "Random", "Reexport", "ScientificTypes", "Statistics", "StatsBase", "Tables"]
git-tree-sha1 = "193f1f1ac77d91eabe1ac81ff48646b378270eef"
uuid = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7"
version = "0.19.5"
[[deps.MLJBase]]
deps = ["CategoricalArrays", "CategoricalDistributions", "ComputationalResources", "Dates", "DelimitedFiles", "Distributed", "Distributions", "InteractiveUtils", "InvertedIndices", "LinearAlgebra", "LossFunctions", "MLJModelInterface", "Missings", "OrderedCollections", "Parameters", "PrettyTables", "ProgressMeter", "Random", "ScientificTypes", "Serialization", "StatisticalTraits", "Statistics", "StatsBase", "Tables"]
git-tree-sha1 = "0b7307d1a7214ec3c0ba305571e713f9492ea984"
uuid = "a7f614a8-145f-11e9-1d2a-a57a1082229d"
version = "0.21.14"
[[deps.MLJEnsembles]]
deps = ["CategoricalArrays", "CategoricalDistributions", "ComputationalResources", "Distributed", "Distributions", "MLJBase", "MLJModelInterface", "ProgressMeter", "Random", "ScientificTypesBase", "StatsBase"]
git-tree-sha1 = "95b306ef8108067d26dfde9ff3457d59911cc0d6"
uuid = "50ed68f4-41fd-4504-931a-ed422449fee0"
version = "0.3.3"
[[deps.MLJFlow]]
deps = ["MLFlowClient", "MLJBase", "MLJModelInterface"]
git-tree-sha1 = "bceeeb648c9aa2fc6f65f957c688b164d30f2905"
uuid = "7b7b8358-b45c-48ea-a8ef-7ca328ad328f"
version = "0.1.1"
[[deps.MLJIteration]]
deps = ["IterationControl", "MLJBase", "Random", "Serialization"]
git-tree-sha1 = "be6d5c71ab499a59e82d65e00a89ceba8732fcd5"
uuid = "614be32b-d00c-4edb-bd02-1eb411ab5e55"
version = "0.5.1"
[[deps.MLJModelInterface]]
deps = ["Random", "ScientificTypesBase", "StatisticalTraits"]
git-tree-sha1 = "03ae109be87f460fe3c96b8a0dbbf9c7bf840bd5"
uuid = "e80e1ace-859a-464e-9ed9-23947d8ae3ea"
version = "1.9.2"
[[deps.MLJModels]]
deps = ["CategoricalArrays", "CategoricalDistributions", "Combinatorics", "Dates", "Distances", "Distributions", "InteractiveUtils", "LinearAlgebra", "MLJModelInterface", "Markdown", "OrderedCollections", "Parameters", "Pkg", "PrettyPrinting", "REPL", "Random", "RelocatableFolders", "ScientificTypes", "StatisticalTraits", "Statistics", "StatsBase", "Tables"]
git-tree-sha1 = "10d221910fc3f3eedad567178ddbca3cc0f776a3"
uuid = "d491faf4-2d78-11e9-2867-c94bc002c0b7"
version = "0.16.12"
[[deps.MLJTuning]]
deps = ["ComputationalResources", "Distributed", "Distributions", "LatinHypercubeSampling", "MLJBase", "ProgressMeter", "Random", "RecipesBase"]
git-tree-sha1 = "02688098bd77827b64ed8ad747c14f715f98cfc4"
uuid = "03970b2e-30c4-11ea-3135-d1576263f10f"
version = "0.7.4"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "9ee1618cbf5240e6d4e0371d6f24065083f60c48"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.11"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS]]
deps = ["Dates", "MbedTLS_jll", "MozillaCACerts_jll", "Random", "Sockets"]
git-tree-sha1 = "03a9b9718f5682ecb107ac9f7308991db4ce395b"
uuid = "739be429-bea8-5141-9913-cc70e7f3736d"
version = "1.1.7"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.2+0"
[[deps.Memoization]]
deps = ["MacroTools"]
git-tree-sha1 = "073f080e733bc6697411901224ed4fd15fefaffa"
uuid = "6fafb56a-5788-4b4e-91ca-c0cea6611c73"
version = "0.2.1"
[[deps.MicroCollections]]
deps = ["BangBang", "InitialValues", "Setfield"]
git-tree-sha1 = "629afd7d10dbc6935ec59b32daeb33bc4460a42e"
uuid = "128add7d-3638-4c79-886c-908ea0c25c34"
version = "0.1.4"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "f66bdc5de519e8f8ae43bdc598782d35a25b1272"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.1.0"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.ModalDecisionTrees]]
deps = ["AbstractTrees", "BenchmarkTools", "CategoricalArrays", "CategoricalDistributions", "DataFrames", "DataStructures", "DelimitedFiles", "Distributions", "FillArrays", "FunctionWrappers", "Logging", "LoggingExtras", "MLJ", "MLJBase", "MLJModelInterface", "Memoization", "OpenML", "Printf", "ProgressMeter", "Random", "Reexport", "ResumableFunctions", "Revise", "SoleBase", "SoleData", "SoleLogics", "SoleModels", "Statistics", "StatsBase", "Suppressor", "Tables", "ThreadsX"]
git-tree-sha1 = "7ad21a5a4c27da12cb4c8b0f76bb29a5048ffbe4"
uuid = "e54bda2e-c571-11ec-9d64-0242ac120002"
version = "0.1.7"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.10.11"
[[deps.NaNMath]]
deps = ["OpenLibm_jll"]
git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "1.0.2"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
version = "1.2.0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.21+4"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+0"
[[deps.OpenML]]
deps = ["ARFFFiles", "HTTP", "JSON", "Markdown", "Pkg", "Scratch"]
git-tree-sha1 = "6efb039ae888699d5a74fb593f6f3e10c7193e33"
uuid = "8b6db2d4-7670-4922-a472-f9537c81ab66"
version = "0.3.1"
[[deps.OpenSSL]]
deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"]
git-tree-sha1 = "51901a49222b09e3743c65b8847687ae5fc78eb2"
uuid = "4d8831e6-92b7-49fb-bdf8-b643e874388c"
version = "1.4.1"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "ceeda72c9fd6bbebc4f4f598560789145a8b6c4c"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "3.0.11+0"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.OrderedCollections]]
git-tree-sha1 = "2e73fe17cac3c62ad1aebe70d44c963c3cfdc3e3"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.6.2"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "fcf8fd477bd7f33cb8dbb1243653fb0d415c256c"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.25"
[[deps.Parameters]]
deps = ["OrderedCollections", "UnPack"]
git-tree-sha1 = "34c0e9ad262e5f7fc75b10a9952ca7692cfc5fbe"
uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a"
version = "0.12.3"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "716e24b21538abc91f6205fd1d8363f39b442851"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.7.2"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.9.0"
[[deps.PooledArrays]]
deps = ["DataAPI", "Future"]
git-tree-sha1 = "36d8b4b899628fb92c2749eb488d884a926614d3"
uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720"
version = "1.4.3"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "03b4c25b43cb84cee5c90aa9b5ea0a78fd848d2f"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.2.0"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00805cd429dcb4870060ff49ef443486c262e38e"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.1"
[[deps.PrettyPrinting]]
git-tree-sha1 = "22a601b04a154ca38867b991d5017469dc75f2db"
uuid = "54e16d92-306c-5ea0-a30b-337be88ac337"
version = "0.4.1"
[[deps.PrettyTables]]
deps = ["Crayons", "LaTeXStrings", "Markdown", "Printf", "Reexport", "StringManipulation", "Tables"]
git-tree-sha1 = "ee094908d720185ddbdc58dbe0c1cbe35453ec7a"
uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
version = "2.2.7"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[deps.ProgressLogging]]
deps = ["Logging", "SHA", "UUIDs"]
git-tree-sha1 = "80d919dee55b9c50e8d9e2da5eeafff3fe58b539"
uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c"
version = "0.1.4"
[[deps.ProgressMeter]]
deps = ["Distributed", "Printf"]
git-tree-sha1 = "00099623ffee15972c16111bcf84c58a0051257c"
uuid = "92933f4c-e287-5a05-a399-4b506db050ca"
version = "1.9.0"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "9ebcd48c498668c7fa0e97a9cae873fbee7bfee1"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.9.1"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.RecipesBase]]
deps = ["PrecompileTools"]
git-tree-sha1 = "5c3d09cc4f31f5fc6af001c250bf1278733100ff"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
version = "1.3.4"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Referenceables]]
deps = ["Adapt"]
git-tree-sha1 = "e681d3bfa49cd46c3c161505caddf20f0e62aaa9"
uuid = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
version = "0.1.2"
[[deps.RelocatableFolders]]
deps = ["SHA", "Scratch"]
git-tree-sha1 = "ffdaf70d81cf6ff22c2b6e733c900c3321cab864"
uuid = "05181044-ff0b-4ac5-8273-598c1e38db00"
version = "1.0.1"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.3.0"
[[deps.ResumableFunctions]]
deps = ["MacroTools"]
git-tree-sha1 = "d0399c12a584c18ed77a351925cd07f5a9b32c36"
uuid = "c5292f4c-5179-55e1-98c5-05642aab7184"
version = "0.6.6"
[[deps.Revise]]
deps = ["CodeTracking", "Distributed", "FileWatching", "JuliaInterpreter", "LibGit2", "LoweredCodeUtils", "OrderedCollections", "Pkg", "REPL", "Requires", "UUIDs", "Unicode"]
git-tree-sha1 = "609c26951d80551620241c3d7090c71a73da75ab"
uuid = "295af30f-e4ad-537b-8983-00126c2a3abe"
version = "3.5.6"
[[deps.Rmath]]
deps = ["Random", "Rmath_jll"]
git-tree-sha1 = "f65dcb5fa46aee0cf9ed6274ccbd597adc49aa7b"
uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa"
version = "0.7.1"
[[deps.Rmath_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6ed52fdd3382cf21947b15e8870ac0ddbff736da"
uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f"
version = "0.4.0+0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.ScientificTypes]]
deps = ["CategoricalArrays", "ColorTypes", "Dates", "Distributions", "PrettyTables", "Reexport", "ScientificTypesBase", "StatisticalTraits", "Tables"]
git-tree-sha1 = "75ccd10ca65b939dab03b812994e571bf1e3e1da"
uuid = "321657f4-b219-11e9-178b-2701a2544e81"
version = "3.0.2"
[[deps.ScientificTypesBase]]
git-tree-sha1 = "a8e18eb383b5ecf1b5e6fc237eb39255044fd92b"
uuid = "30f210dd-8aff-4c5f-94ba-8e64358c1161"
version = "3.0.0"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "30449ee12237627992a99d5e30ae63e4d78cd24a"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.2.0"
[[deps.SentinelArrays]]
deps = ["Dates", "Random"]
git-tree-sha1 = "04bdff0b09c65ff3e06a05e3eb7b120223da3d39"
uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c"
version = "1.4.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Setfield]]
deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"]
git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac"
uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46"
version = "1.1.1"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.ShowCases]]
git-tree-sha1 = "7f534ad62ab2bd48591bdeac81994ea8c445e4a5"
uuid = "605ecd9f-84a6-4c9e-81e2-4798472b76a3"
version = "0.1.0"
[[deps.SimpleBufferStream]]
git-tree-sha1 = "874e8867b33a00e784c8a7e4b60afe9e037b74e1"
uuid = "777ac1f9-54b0-4bf8-805c-2214025038e7"
version = "1.1.0"
[[deps.SimpleTraits]]
deps = ["InteractiveUtils", "MacroTools"]
git-tree-sha1 = "5d7e3f4e11935503d3ecaf7186eac40602e7d231"
uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
version = "0.9.4"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SoleBase]]
deps = ["IterTools", "Logging", "Random"]
git-tree-sha1 = "ef80c88a87a76353335eec7b8aa39912ac6816a6"
uuid = "4475fa32-7023-44a0-aa70-4813b230e492"
version = "0.11.0"
[[deps.SoleData]]
deps = ["CSV", "Catch22", "DataFrames", "DataStructures", "Logging", "Random", "Reexport", "Revise", "ScientificTypes", "SoleBase", "Statistics"]
git-tree-sha1 = "320dd0551ef82953609ca7ea624608f8e1884a25"
uuid = "123f1ae1-6307-4526-ab5b-aab3a92a2b8c"
version = "0.10.1"
[[deps.SoleLogics]]
deps = ["DataStructures", "Dictionaries", "Graphs", "IterTools", "Lazy", "PrettyTables", "Random", "Reexport", "Revise", "SoleBase", "StatsBase", "ThreadSafeDicts"]
git-tree-sha1 = "5cd0b38b73972dc91ed97cd3bf2cc21b9378b05d"
uuid = "b002da8f-3cb3-4d91-bbe3-2953433912b5"
version = "0.5.0"
[[deps.SoleModels]]
deps = ["BenchmarkTools", "CSV", "CategoricalArrays", "DataFrames", "DataStructures", "FillArrays", "FunctionWrappers", "Graphs", "HTTP", "Lazy", "LinearAlgebra", "Logging", "MLJBase", "MLJModelInterface", "ProgressMeter", "Random", "Reexport", "Revise", "SoleBase", "SoleData", "SoleLogics", "StatsBase", "Suppressor", "Tables", "ThreadSafeDicts", "UniqueVectors", "ZipFile"]
git-tree-sha1 = "0abe20c82108697c82b904e8da986a64862ca8cc"
uuid = "4249d9c7-3290-4ddd-961c-e1d3ec2467f8"
version = "0.3.0"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "c60ec5c62180f27efea3ba2908480f8055e17cee"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.1.1"
[[deps.SparseArrays]]
deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "e2cfc4012a19088254b3950b85c3c1d8882d864d"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.3.1"
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
[deps.SpecialFunctions.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
[[deps.SplittablesBase]]
deps = ["Setfield", "Test"]
git-tree-sha1 = "e08a62abc517eb79667d0a29dc08a3b589516bb5"
uuid = "171d559e-b47b-412a-8079-5efa626c420e"
version = "0.1.15"
[[deps.StableRNGs]]
deps = ["Random", "Test"]
git-tree-sha1 = "3be7d49667040add7ee151fefaf1f8c04c8c8276"
uuid = "860ef19b-820b-49d6-a774-d7a799459cd3"
version = "1.0.0"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "StaticArraysCore"]
git-tree-sha1 = "0adf069a2a490c47273727e029371b31d44b72b2"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.6.5"
weakdeps = ["Statistics"]
[deps.StaticArrays.extensions]
StaticArraysStatisticsExt = "Statistics"
[[deps.StaticArraysCore]]
git-tree-sha1 = "36b3d696ce6366023a0ea192b4cd442268995a0d"
uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c"
version = "1.4.2"
[[deps.StatisticalTraits]]
deps = ["ScientificTypesBase"]
git-tree-sha1 = "30b9236691858e13f167ce829490a68e1a597782"
uuid = "64bff920-2084-43da-a3e6-9bb72801c0c9"
version = "3.2.0"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
version = "1.9.0"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "1ff449ad350c9c4cbc756624d6f8a8c3ef56d3ed"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.7.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "d1bf48bfcc554a3761a133fe3a9bb01488e06916"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.21"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "f625d686d5a88bcd2b15cd81f18f98186fdc0c9a"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.0"
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"
StatsFunsInverseFunctionsExt = "InverseFunctions"
[deps.StatsFuns.weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112"
[[deps.StringManipulation]]
deps = ["PrecompileTools"]
git-tree-sha1 = "a04cabe79c5f01f4d723cc6704070ada0b9d46d5"
uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e"
version = "0.3.4"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.SuiteSparse_jll]]
deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"]
uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c"
version = "5.10.1+6"
[[deps.Suppressor]]
deps = ["Logging"]
git-tree-sha1 = "6cd9e4a207964c07bf6395beff7a1e8f21d0f3b2"
uuid = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
version = "0.2.6"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.3"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "OrderedCollections", "TableTraits"]
git-tree-sha1 = "a1f34829d5ac0ef499f6d84428bd6b4c71f02ead"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.11.0"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.ThreadSafeDicts]]
git-tree-sha1 = "cdc778da600ff2166239a80cf4d82a9b118611d8"
uuid = "4239201d-c60e-5e0a-9702-85d713665ba7"
version = "0.1.3"
[[deps.ThreadsX]]
deps = ["ArgCheck", "BangBang", "ConstructionBase", "InitialValues", "MicroCollections", "Referenceables", "Setfield", "SplittablesBase", "Transducers"]
git-tree-sha1 = "34e6bcf36b9ed5d56489600cf9f3c16843fa2aa2"
uuid = "ac1d9e8a-700a-412c-b207-f0111f4b6c0d"
version = "0.1.11"
[[deps.TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "9a6ae7ed916312b41236fcef7e0af564ef934769"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.13"
[[deps.Transducers]]
deps = ["Adapt", "ArgCheck", "BangBang", "Baselet", "CompositionsBase", "ConstructionBase", "DefineSingletons", "Distributed", "InitialValues", "Logging", "Markdown", "MicroCollections", "Requires", "Setfield", "SplittablesBase", "Tables"]
git-tree-sha1 = "53bd5978b182fa7c57577bdb452c35e5b4fb73a5"
uuid = "28d57a85-8fef-5791-bfe6-a80928e7c999"
version = "0.4.78"
[deps.Transducers.extensions]
TransducersBlockArraysExt = "BlockArrays"
TransducersDataFramesExt = "DataFrames"
TransducersLazyArraysExt = "LazyArrays"
TransducersOnlineStatsBaseExt = "OnlineStatsBase"
TransducersReferenceablesExt = "Referenceables"
[deps.Transducers.weakdeps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
OnlineStatsBase = "925886fa-5bf2-5e8e-b522-a9147a512338"
Referenceables = "42d2dcc6-99eb-4e98-b66c-637b7d73030e"
[[deps.URIs]]
git-tree-sha1 = "b7a5e99f24892b6824a954199a45e9ffcc1c70f0"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.5.0"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.UnPack]]
git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b"
uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"
version = "1.0.2"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UniqueVectors]]
git-tree-sha1 = "0a150de447f51342cf2e5b379137b823f3934864"
uuid = "2fbcfb34-fd0c-5fbb-b5d7-e826d8f5b0a9"
version = "1.2.0"
[[deps.WeakRefStrings]]
deps = ["DataAPI", "InlineStrings", "Parsers"]
git-tree-sha1 = "b1be2855ed9ed8eac54e5caff2afcdb442d52c23"
uuid = "ea10d353-3f73-51f8-a26c-33c1cb351aa5"
version = "1.4.2"
[[deps.WorkerUtilities]]
git-tree-sha1 = "cd1659ba0d57b71a464a29e64dbc67cfe83d54e7"
uuid = "76eceee3-57b5-4d4a-8e66-0e911cebbf60"
version = "1.6.1"
[[deps.ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "f492b7fe1698e623024e873244f10d89c95c340a"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.10.1"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.13+0"
[[deps.catch22_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7cfb827b3f62e20de3ccebaabf468ea979d098d9"
uuid = "8a07c0c5-99ad-56cb-bc82-72eed1bb61ce"
version = "0.4.0+0"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.7.0+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"
"""
# ╔═╡ Cell order:
# ╠═65141ec2-4da9-11ee-2a0a-8974a3ec37da
# ╠═dea67564-e905-4a49-9717-ca8071a4d489
# ╠═9ee33338-6985-4745-ab74-7caeac73496e
# ╠═99b1da9b-517a-4a84-8d0c-26e9f9ff6e15
# ╠═a00c4abe-5534-4c42-9464-d5a6867b9802
# ╠═7655458a-d5af-44d9-9ff6-74db67f3d5d4
# ╠═66ec0d97-a221-4363-a21d-1c15835645d2
# ╠═b6d6ed79-9abc-47a6-bbbc-1a1610ffb830
# ╠═00310a1c-c4f4-43bc-a60c-a6113434f242
# ╠═b3417b90-c910-407a-939b-5190602c5899
# ╠═3cdf0a35-edd0-4a02-9410-94e828d0f519
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 942 | using ModalDecisionTrees
using Documenter
DocMeta.setdocmeta!(ModalDecisionTrees, :DocTestSetup, :(using ModalDecisionTrees); recursive=true)
makedocs(;
modules=[ModalDecisionTrees, ModalDecisionTrees.MLJInterface, ModalDecisionTrees.experimentals],
authors="Federico Manzella, Giovanni Pagliarini, Eduard I. Stan",
repo=Documenter.Remotes.GitHub("aclai-lab", "ModalDecisionTrees.jl"),
sitename="ModalDecisionTrees.jl",
format=Documenter.HTML(;
size_threshold = 4000000,
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://aclai-lab.github.io/ModalDecisionTrees.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
warnonly = true, # TODO remove?
)
deploydocs(;
repo = "github.com/aclai-lab/ModalDecisionTrees.jl",
target = "build",
branch = "gh-pages",
versions = ["main" => "main", "stable" => "v^", "v#.#", "dev" => "dev"],
)
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 437 | using Lazy
# TODO fix citation.
"""
Recursion state for ModalCART (see paper On The Foundations of Modal Decision Trees)
"""
abstract type MCARTState end
"""
TODO document
vector of current worlds for each instance and modality
"""
struct RestrictedMCARTState{WS<:AbstractVector{WST} where {WorldType,WST<:Vector{WorldType}}} <: MCARTState
witnesses::WS
end
struct FullMCARTState{ANC<:Vector} <: MCARTState
ancestors::ANC
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 60446 | # The code in this file for decision tree learning is inspired from:
# - Ben Sadeghi's DecisionTree.jl (released under the MIT license);
# - scikit-learn's and numpy's (released under the 3-Clause BSD license);
# Also thanks to Poom Chiarawongse <[email protected]>
##############################################################################
##############################################################################
##############################################################################
##############################################################################
mutable struct NodeMeta{L<:Label,P,D<:AbstractDecision} <: AbstractNode{L}
region :: UnitRange{Int} # a slice of the instances used to decide the split of the node
depth :: Int
modaldepth :: Int
# worlds :: AbstractVector{Worlds{W}} # current set of worlds for each training instance
purity :: P # purity grade attained at training time
prediction :: L # most likely label
is_leaf :: Bool # whether this is a leaf node, or a split one
# split node-only properties
split_at :: Int # index of instances
parent :: Union{Nothing,NodeMeta{L,P,D}} # parent node
l :: NodeMeta{L,P,D} # left child node
r :: NodeMeta{L,P,D} # right child node
purity_times_nt :: P # purity grade attained at training time
consistency :: Any
i_modality :: ModalityId # modality id
decision :: D
onlyallowglobal:: Vector{Bool}
function NodeMeta{L,P,D}(
region :: UnitRange{Int},
depth :: Int,
modaldepth :: Int,
oura :: Vector{Bool},
) where {L<:Label,P,D<:AbstractDecision}
node = new{L,P,D}()
node.region = region
node.depth = depth
node.modaldepth = modaldepth
node.purity = P(NaN)
node.is_leaf = false
node.parent = nothing
node.onlyallowglobal = oura
node
end
end
include("ModalCART-states.jl")
isleftchild(node::NodeMeta, parent::NodeMeta) = (parent.l == node)
isrightchild(node::NodeMeta, parent::NodeMeta) = (parent.r == node)
function lastrightancestor(node::NodeMeta)
n = node
while !isnothing(n.parent) && isrightchild(n, n.parent)
n = n.parent
end
return n
end
function makeleaf!(node::NodeMeta)
node.is_leaf = true
# node.i_modality = nothing
# node.purity_times_nt = nothing
# node.decision = nothing
# node.consistency = nothing
end
# Conversion: NodeMeta (node + training info) -> DTNode (bare decision tree model)
function _convert(
node :: NodeMeta,
labels :: AbstractVector{L},
class_names :: AbstractVector{L},
threshold_backmap :: Vector{<:Function}
) where {L<:CLabel}
this_leaf = DTLeaf(class_names[node.prediction], labels[node.region])
if node.is_leaf
this_leaf
else
left = _convert(node.l, labels, class_names, threshold_backmap)
right = _convert(node.r, labels, class_names, threshold_backmap)
DTInternal(node.i_modality, RestrictedDecision(node.decision, threshold_backmap[node.i_modality]), this_leaf, left, right)
end
end
# Conversion: NodeMeta (node + training info) -> DTNode (bare decision tree model)
function _convert(
node :: NodeMeta,
labels :: AbstractVector{L},
threshold_backmap :: Vector{<:Function}
) where {L<:RLabel}
this_leaf = DTLeaf(node.prediction, labels[node.region])
if node.is_leaf
this_leaf
else
left = _convert(node.l, labels, threshold_backmap)
right = _convert(node.r, labels, threshold_backmap)
DTInternal(node.i_modality, RestrictedDecision(node.decision, threshold_backmap[node.i_modality]), this_leaf, left, right)
end
end
##############################################################################
##############################################################################
##############################################################################
##############################################################################
# function optimize_tree_parameters!(
# X :: DimensionalLogise't{T,N},
# initcond :: InitialCondition,
# allow_global_splits :: Bool,
# test_operators :: AbstractVector{<:TestOperator}
# ) where {T,N}
# # A dimensional ontological datasets:
# # flatten to adimensional case + strip of all relations from the ontology
# if prod(maxchannelsize(X)) == 1
# if (length(ontology(X).relations) > 0)
# @warn "The DimensionalLogise't provided has degenerate maxchannelsize $(maxchannelsize(X)), and more than 0 relations: $(ontology(X).relations)."
# end
# # X = DimensionalLogise't{T,0}(DimensionalDatasets.strip_ontology(ontology(X)), @views DimensionalDatasets.strip_domain(domain(X)))
# end
# ontology_relations = deepcopy(ontology(X).relations)
# # Fix test_operators order
# test_operators = unique(test_operators)
# DimensionalDatasets.sort_test_operators!(test_operators)
# # Adimensional operators:
# # in the adimensional case, some pairs of operators (e.g. <= and >)
# # are complementary, and thus it is redundant to check both at the same node.
# # We avoid this by only keeping one of the two operators.
# if prod(maxchannelsize(X)) == 1
# # No ontological relation
# ontology_relations = []
# if test_operators ⊆ DimensionalDatasets.all_lowlevel_test_operators
# test_operators = [canonical_geq]
# # test_operators = filter(e->e ≠ canonical_geq,test_operators)
# else
# @warn "Test operators set includes non-lowlevel test operators. Update this part of the code accordingly."
# end
# end
# # Softened operators:
# # when the largest world only has a few values, softened operators fallback
# # to being hard operators
# # max_world_wratio = 1/prod(maxchannelsize(X))
# # if canonical_geq in test_operators
# # test_operators = filter((e)->(!(e isa CanonicalConditionGeqSoft) || e.alpha < 1-max_world_wratio), test_operators)
# # end
# # if canonical_leq in test_operators
# # test_operators = filter((e)->(!(e isa CanonicalConditionLeqSoft) || e.alpha < 1-max_world_wratio), test_operators)
# # end
# # Binary relations (= unary modal connectives)
# # Note: the identity relation is the first, and it is the one representing
# # propositional splits.
# if identityrel in ontology_relations
# error("Found identityrel in ontology provided. No need.")
# # ontology_relations = filter(e->e ≠ identityrel, ontology_relations)
# end
# if globalrel in ontology_relations
# error("Found globalrel in ontology provided. Use allow_global_splits = true instead.")
# # ontology_relations = filter(e->e ≠ globalrel, ontology_relations)
# # allow_global_splits = true
# end
# relations = [identityrel, globalrel, ontology_relations...]
# relationId_id = 1
# relationGlob_id = 2
# ontology_relation_ids = map((x)->x+2, 1:length(ontology_relations))
# compute_globmemoset = (allow_global_splits || (initcond == ModalDecisionTrees.start_without_world))
# # Modal relations to compute gammas for
# inUseRelation_ids = if compute_globmemoset
# [relationGlob_id, ontology_relation_ids...]
# else
# ontology_relation_ids
# end
# # Relations to use at each split
# availableRelation_ids = []
# push!(availableRelation_ids, relationId_id)
# if allow_global_splits
# push!(availableRelation_ids, relationGlob_id)
# end
# availableRelation_ids = [availableRelation_ids..., ontology_relation_ids...]
# (
# test_operators, relations,
# relationId_id, relationGlob_id,
# inUseRelation_ids, availableRelation_ids
# )
# end
# DEBUGprintln = println
############################################################################################
############################################################################################
############################################################################################
# TODO restore resumable. Unfortunately this yields "UndefRefError: access to undefined reference"
# Base.@propagate_inbounds @resumable function generate_relevant_decisions(
function generate_relevant_decisions(
Xs,
Sfs,
n_subrelations,
n_subfeatures,
allow_global_splits,
node,
rng,
max_modal_depth,
idxs,
region,
grouped_featsaggrsnopss,
grouped_featsnaggrss,
)
out = []
@inbounds for (i_modality,
(X,
modality_Sf,
modality_n_subrelations::Function,
modality_n_subfeatures,
modality_allow_global_splits,
modality_onlyallowglobal)
) in enumerate(zip(eachmodality(Xs), Sfs, n_subrelations, n_subfeatures, allow_global_splits, node.onlyallowglobal))
@logmsg LogDetail " Modality $(i_modality)/$(nmodalities(Xs))"
allow_propositional_decisions, allow_modal_decisions, allow_global_decisions, modal_relations_inds, features_inds = begin
# Derive subset of features to consider
# Note: using "sample" function instead of "randperm" allows to insert weights for features which may be wanted in the future
features_inds = StatsBase.sample(rng, 1:nfeatures(X), modality_n_subfeatures, replace = false)
sort!(features_inds)
# Derive all available relations
allow_propositional_decisions, allow_modal_decisions, allow_global_decisions = begin
if worldtype(X) == OneWorld
true, false, false
elseif modality_onlyallowglobal
false, false, true
else
true, true, modality_allow_global_splits
end
end
if !isnothing(max_modal_depth) && max_modal_depth <= node.modaldepth
allow_modal_decisions = false
end
n_tot_relations = 0
if allow_modal_decisions
n_tot_relations += length(relations(X))
end
if allow_global_decisions
n_tot_relations += 1
end
# Derive subset of relations to consider
n_subrel = Int(modality_n_subrelations(n_tot_relations))
modal_relations_inds = StatsBase.sample(rng, 1:n_tot_relations, n_subrel, replace = false)
sort!(modal_relations_inds)
# Check whether the global relation survived
if allow_global_decisions
allow_global_decisions = (n_tot_relations in modal_relations_inds)
modal_relations_inds = filter!(r->r≠n_tot_relations, modal_relations_inds)
n_tot_relations = length(modal_relations_inds)
end
allow_propositional_decisions, allow_modal_decisions, allow_global_decisions, modal_relations_inds, features_inds
end
@inbounds for (relation, metacondition, test_op, aggr_thresholds) in generate_decisions(
X,
idxs[region],
modality_Sf,
allow_propositional_decisions,
allow_modal_decisions,
allow_global_decisions,
modal_relations_inds,
features_inds,
grouped_featsaggrsnopss[i_modality],
grouped_featsnaggrss[i_modality],
)
decision_instantiator = _threshold->begin
cond = ScalarCondition(metacondition, _threshold)
RestrictedDecision(ScalarExistentialFormula(relation, cond))
end
push!(out, (i_modality, decision_instantiator, test_op, aggr_thresholds))
# @yield i_modality, decision_instantiator, test_op, aggr_thresholds
end # END decisions
end # END modality
return out
end
############################################################################################
############################################################################################
############################################################################################
# Split a node
# Find an optimal local split satisfying the given constraints
# (e.g. max_depth, min_samples_leaf, etc.)
Base.@propagate_inbounds @inline function optimize_node!(
node :: NodeMeta{L,P,D}, # node to split
Xs :: MultiLogiset, # modal dataset
Y :: AbstractVector{L}, # label vector
W :: AbstractVector{U}, # weight vector
grouped_featsaggrsnopss :: AbstractVector{<:AbstractVector{<:AbstractDict{<:Aggregator,<:AbstractVector{<:ScalarMetaCondition}}}},
grouped_featsnaggrss :: AbstractVector{<:AbstractVector{<:AbstractVector{<:Tuple{<:Integer,<:Aggregator}}}},
lookahead_depth :: Integer,
##########################################################################
Ss :: AbstractVector{S},
##########################################################################
_is_classification :: Union{Val{true},Val{false}},
_using_lookahead :: Union{Val{true},Val{false}},
_perform_consistency_check:: Union{Val{true},Val{false}},
##########################################################################
;
# Logic-agnostic training parameters
loss_function :: Loss,
lookahead :: Integer, # maximum depth of the tree to locally optimize for
max_depth :: Union{Nothing,Int}, # maximum depth of the resultant tree
min_samples_leaf :: Int, # minimum number of instancs each leaf needs to have
min_purity_increase :: AbstractFloat, # maximum purity allowed on a leaf
max_purity_at_leaf :: AbstractFloat, # minimum purity increase needed for a split
##########################################################################
# Modal parameters
max_modal_depth :: Union{Nothing,Int}, # maximum modal depth of the resultant tree
n_subrelations :: AbstractVector{NSubRelationsFunction}, # relations used for the decisions
n_subfeatures :: AbstractVector{Int}, # number of features for the decisions
allow_global_splits :: AbstractVector{Bool}, # allow/disallow using globalrel at any decisional node
##########################################################################
# Other
idxs :: AbstractVector{Int},
n_classes :: Int,
rng :: Random.AbstractRNG,
) where{P,L<:_Label,D<:AbstractDecision,U,NSubRelationsFunction<:Function,S<:MCARTState}
# Region of idxs to use to perform the split
region = node.region
_ninstances = length(region)
r_start = region.start - 1
# DEBUGprintln("optimize_node!"); readline()
# Gather all values needed for the current set of instances
# TODO also slice the dataset?
@inbounds Yf = Y[idxs[region]]
@inbounds Wf = W[idxs[region]]
# Yf = Vector{L}(undef, _ninstances)
# Wf = Vector{U}(undef, _ninstances)
# @inbounds @simd for i in 1:_ninstances
# Yf[i] = Y[idxs[i + r_start]]
# Wf[i] = W[idxs[i + r_start]]
# end
############################################################################
# Prepare counts
############################################################################
if isa(_is_classification, Val{true})
(nc, nt), (node.purity, node.prediction) = begin
nc = fill(zero(U), n_classes)
@inbounds @simd for i in 1:_ninstances
nc[Yf[i]] += Wf[i]
end
nt = sum(nc)
# TODO use _compute_purity
purity = loss_function(loss_function(nc, nt)::Float64)::Float64
# Assign the most likely label before the split
prediction = argmax(nc)
# prediction = bestguess(Yf)
(nc, nt), (purity, prediction)
end
else
sums, (tsum, nt),
(node.purity, node.prediction) = begin
# sums = [Wf[i]*Yf[i] for i in 1:_ninstances]
sums = Yf
# ssqs = [Wf[i]*Yf[i]*Yf[i] for i in 1:_ninstances]
# tssq = zero(U)
# tssq = sum(ssqs)
# tsum = zero(U)
tsum = sum(sums)
# nt = zero(U)
nt = sum(Wf)
# @inbounds @simd for i in 1:_ninstances
# # tssq += Wf[i]*Yf[i]*Yf[i]
# # tsum += Wf[i]*Yf[i]
# nt += Wf[i]
# end
# purity = (tsum * prediction) # TODO use loss function
# purity = tsum * tsum # TODO use loss function
# tmean = tsum/nt
# purity = -((tssq - 2*tmean*tsum + (tmean^2*nt)) / (nt-1)) # TODO use loss function
# TODO use _compute_purity
purity = begin
if W isa Ones{Int}
loss_function(loss_function(sums, tsum, length(sums))::Float64)
else
loss_function(loss_function(sums, Wf, nt)::Float64)
end
end
# Assign the most likely label before the split
prediction = tsum / nt
# prediction = bestguess(Yf)
sums, (tsum, nt), (purity, prediction)
end
end
############################################################################
############################################################################
############################################################################
@logmsg LogDebug "_split!(...) " _ninstances region nt
############################################################################
# Preemptive leaf conditions
############################################################################
if isa(_is_classification, Val{true})
if (
# If all instances belong to the same class, make this a leaf
(nc[node.prediction] == nt)
# No binary split can honor min_samples_leaf if there are not as many as
# min_samples_leaf*2 instances in the first place
|| (min_samples_leaf * 2 > _ninstances)
# If the node is pure enough, avoid splitting # TODO rename purity to loss
|| (node.purity > max_purity_at_leaf)
# Honor maximum depth constraint
|| (!isnothing(max_depth) && max_depth <= node.depth))
# DEBUGprintln("BEFORE LEAF!")
# DEBUGprintln(nc[node.prediction])
# DEBUGprintln(nt)
# DEBUGprintln(min_samples_leaf)
# DEBUGprintln(_ninstances)
# DEBUGprintln(node.purity)
# DEBUGprintln(max_purity_at_leaf)
# DEBUGprintln(max_depth)
# DEBUGprintln(node.depth)
# readline()
node.is_leaf = true
# @logmsg LogDetail "leaf created: " (min_samples_leaf * 2 > _ninstances) (nc[node.prediction] == nt) (node.purity > max_purity_at_leaf) (max_depth <= node.depth)
return
end
else
if (
# No binary split can honor min_samples_leaf if there are not as many as
# min_samples_leaf*2 instances in the first place
(min_samples_leaf * 2 > _ninstances)
# equivalent to old_purity > -1e-7
|| (node.purity > max_purity_at_leaf) # TODO
# || (tsum * node.prediction > -1e-7 * nt + tssq)
# Honor maximum depth constraint
|| (!isnothing(max_depth) && max_depth <= node.depth))
node.is_leaf = true
# @logmsg LogDetail "leaf created: " (min_samples_leaf * 2 > _ninstances) (tsum * node.prediction > -1e-7 * nt + tssq) (tsum * node.prediction) (-1e-7 * nt + tssq) (max_depth <= node.depth)
return
end
end
########################################################################################
########################################################################################
########################################################################################
# TODO try this solution for rsums and lsums (regression case)
# rsums = Vector{U}(undef, _ninstances)
# lsums = Vector{U}(undef, _ninstances)
# @simd for i in 1:_ninstances
# rsums[i] = zero(U)
# lsums[i] = zero(U)
# end
if eltype(Ss) <: RestrictedMCARTState
# TODO @view
Sfs = Vector{Vector{WST} where {WorldType,WST<:Vector{WorldType}}}(undef, nmodalities(Xs))
for (i_modality,WT) in enumerate(worldtype.(eachmodality(Xs)))
Sfs[i_modality] = Vector{Vector{WT}}(undef, _ninstances)
@simd for i in 1:_ninstances
Sfs[i_modality][i] = Ss[i_modality].witnesses[idxs[i + r_start]]
end
end
end
########################################################################################
########################################################################################
########################################################################################
is_lookahead_basecase = (isa(_using_lookahead, Val{true}) && lookahead_depth == lookahead)
performing_consistency_check = (isa(_perform_consistency_check, Val{true}) || is_lookahead_basecase)
function splitnode!(node, Ss, idxs)
# TODO, actually, when using Shannon entropy, we must correct the purity:
corrected_this_purity_times_nt = loss_function(node.purity_times_nt)::Float64
# DEBUGprintln("corrected_this_purity_times_nt: $(corrected_this_purity_times_nt)")
# DEBUGprintln(min_purity_increase)
# DEBUGprintln(node.purity)
# DEBUGprintln(corrected_this_purity_times_nt)
# DEBUGprintln(nt)
# DEBUGprintln(purity - node.purity_times_nt/nt)
# DEBUGprintln("dishonor: $(dishonor_min_purity_increase(L, min_purity_increase, node.purity, corrected_this_purity_times_nt, nt))")
# readline()
# println("corrected_this_purity_times_nt = $(corrected_this_purity_times_nt)")
# println("nt = $(nt)")
# println("node.purity = $(node.purity)")
# println("corrected_this_purity_times_nt / nt - node.purity = $(corrected_this_purity_times_nt / nt - node.purity)")
# println("min_purity_increase * nt = $(min_purity_increase) * $(nt) = $(min_purity_increase * nt)")
# @logmsg LogOverview "purity_times_nt increase" corrected_this_purity_times_nt/nt node.purity (corrected_this_purity_times_nt/nt + node.purity) (node.purity_times_nt/nt - node.purity)
# If the best split is good, partition and split accordingly
@inbounds if ((
corrected_this_purity_times_nt == typemin(P)) ||
dishonor_min_purity_increase(L, min_purity_increase, node.purity, corrected_this_purity_times_nt, nt)
)
# if isa(_is_classification, Val{true})
# @logmsg LogDebug " Leaf" corrected_this_purity_times_nt min_purity_increase (corrected_this_purity_times_nt/nt) node.purity ((corrected_this_purity_times_nt/nt) - node.purity)
# else
# @logmsg LogDebug " Leaf" corrected_this_purity_times_nt tsum node.prediction min_purity_increase nt (corrected_this_purity_times_nt / nt - tsum * node.prediction) (min_purity_increase * nt)
# end
makeleaf!(node)
return false
end
# Compute new world sets (= take a modal step)
# println(decision_str)
decision_str = displaydecision(node.i_modality, node.decision)
# TODO instead of using memory, here, just use two opposite indices and perform substitutions. indj = _ninstances
post_unsatisfied = fill(1, _ninstances)
if performing_consistency_check
world_refs = []
end
for i_instance in 1:_ninstances
# TODO perform step with an OntologicalModalDataset
X = modality(Xs, node.i_modality)
# instance = DimensionalDatasets.get_instance(X, idxs[i_instance + r_start])
# println(instance)
# println(Sfs[node.i_modality][i_instance])
_sat, _ss = modalstep(X, idxs[i_instance + r_start], Sfs[node.i_modality][i_instance], node.decision)
# _sat, _ss = modalstep(X, idxs[i_instance + r_start], Ss[node.i_modality][idxs[i_instance + r_start]], node.decision)
(issat,Ss[node.i_modality].witnesses[idxs[i_instance + r_start]]) = _sat, _ss
# @logmsg LogDetail " [$issat] Instance $(i_instance)/$(_ninstances)" Sfs[node.i_modality][i_instance] (if issat Ss[node.i_modality][idxs[i_instance + r_start]] end)
# println(issat)
# println(Ss[node.i_modality][idxs[i_instance + r_start]])
# readline()
# I'm using unsatisfied because sorting puts YES instances first, but TODO use the inverse sorting and use issat flag instead
post_unsatisfied[i_instance] = !issat
if performing_consistency_check
push!(world_refs, _ss)
end
end
@logmsg LogDetail " post_unsatisfied" post_unsatisfied
# if length(unique(post_unsatisfied)) == 1
# @warn "An uninformative split was reached. Something's off\nPurity: $(node.purity)\nSplit: $(decision_str)\nUnsatisfied flags: $(post_unsatisfied)"
# makeleaf!(node)
# return false
# end
@logmsg LogDetail " Branch ($(sum(post_unsatisfied))+$(_ninstances-sum(post_unsatisfied))=$(_ninstances) instances) at modality $(node.i_modality) with decision: $(decision_str), purity $(node.purity)"
# if sum(post_unsatisfied) >= min_samples_leaf && (_ninstances - sum(post_unsatisfied)) >= min_samples_leaf
# DEBUGprintln("LEAF!")
# makeleaf!(node)
# return false
# end
########################################################################################
########################################################################################
########################################################################################
# Check consistency
consistency = begin
if performing_consistency_check
post_unsatisfied
else
sum(Wf[BitVector(post_unsatisfied)])
end
end
# @logmsg LogDetail " post_unsatisfied" post_unsatisfied
# if !isapprox(node.consistency, consistency; atol=eps(Float16), rtol=eps(Float16))
# errStr = ""
# errStr *= "A low-level error occurred. Please open a pull request with the following info."
# errStr *= "Decision $(node.decision).\n"
# errStr *= "Possible causes:\n"
# errStr *= "- feature returning NaNs\n"
# errStr *= "- erroneous representatives for relation $(relation(node.decision)), aggregator $(existential_aggregator(test_operator(node.decision))) and feature $(feature(node.decision))\n"
# errStr *= "\n"
# errStr *= "Branch ($(sum(post_unsatisfied))+$(_ninstances-sum(post_unsatisfied))=$(_ninstances) instances) at modality $(node.i_modality) with decision: $(decision_str), purity $(node.purity)\n"
# errStr *= "$(length(idxs[region])) Instances: $(idxs[region])\n"
# errStr *= "Different partition was expected:\n"
# if performing_consistency_check
# errStr *= "Actual: $(consistency) ($(sum(consistency)))\n"
# errStr *= "Expected: $(node.consistency) ($(sum(node.consistency)))\n"
# diff = node.consistency.-consistency
# errStr *= "Difference: $(diff) ($(sum(abs.(diff))))\n"
# else
# errStr *= "Actual: $(consistency)\n"
# errStr *= "Expected: $(node.consistency)\n"
# diff = node.consistency-consistency
# errStr *= "Difference: $(diff)\n"
# end
# errStr *= "post_unsatisfied = $(post_unsatisfied)\n"
# if performing_consistency_check
# errStr *= "world_refs = $(world_refs)\n"
# errStr *= "new world_refs = $([Ss[node.i_modality][idxs[i_instance + r_start]] for i_instance in 1:_ninstances])\n"
# end
# # for i in 1:_ninstances
# # errStr *= "$(DimensionalDatasets.get_channel(Xs, idxs[i + r_start], feature(node.decision)))\t$(Sfs[node.i_modality][i])\t$(!(post_unsatisfied[i]==1))\t$(Ss[node.i_modality][idxs[i + r_start]])\n";
# # end
# println("ERROR! " * errStr)
# end
# if length(unique(post_unsatisfied)) == 1
# # Note: this should always be satisfied, since min_samples_leaf is always > 0 and nl,nr>min_samples_leaf
# errStr = "An uninformative split was reached."
# errStr *= "Something's off with this algorithm\n"
# errStr *= "Purity: $(node.purity)\n"
# errStr *= "Split: $(decision_str)\n"
# errStr *= "Unsatisfied flags: $(post_unsatisfied)"
# println("ERROR! " * errStr)
# # error(errStr)
# makeleaf!(node)
# return false
# end
########################################################################################
########################################################################################
########################################################################################
# @show post_unsatisfied
# @logmsg LogDetail "pre-partition" region idxs[region] post_unsatisfied
node.split_at = partition!(idxs, post_unsatisfied, 0, region)
node.purity = corrected_this_purity_times_nt/nt
# @logmsg LogDetail "post-partition" idxs[region] node.split_at
ind = node.split_at
oura = node.onlyallowglobal
mdepth = node.modaldepth
leftmodaldepth, rightmodaldepth = begin
if is_propositional_decision(node.decision)
mdepth, mdepth
else
# The left decision nests in the last right ancestor's formula
# The right decision
(lastrightancestor(node).modaldepth+1), (lastrightancestor(node).modaldepth+1)
end
end
# onlyallowglobal changes:
# on the left node, the modality where the decision was taken
l_oura = copy(oura)
l_oura[node.i_modality] = false
r_oura = oura
# no need to copy because we will copy at the end
node.l = typeof(node)(region[ 1:ind], node.depth+1, leftmodaldepth, l_oura)
node.r = typeof(node)(region[ind+1:end], node.depth+1, rightmodaldepth, r_oura)
return true
end
########################################################################################
########################################################################################
########################################################################################
########################################################################################
#################################### Find best split ###################################
########################################################################################
if performing_consistency_check
unsatisfied = Vector{Bool}(undef, _ninstances)
end
# Optimization-tracking variables
node.purity_times_nt = typemin(P)
# node.i_modality = -1
# node.decision = RestrictedDecision(ScalarExistentialFormula{Float64}())
# node.consistency = nothing
perform_domain_optimization = is_lookahead_basecase && !performing_consistency_check
## Test all decisions or each modality
for (i_modality, decision_instantiator, test_op, aggr_thresholds) in generate_relevant_decisions(
Xs,
Sfs,
n_subrelations,
n_subfeatures,
allow_global_splits,
node,
rng,
max_modal_depth,
idxs,
region,
grouped_featsaggrsnopss,
grouped_featsnaggrss,
)
if isa(_is_classification, Val{true})
thresh_domain, additional_info = limit_threshold_domain(aggr_thresholds, Yf, Wf, loss_function, test_op, min_samples_leaf, perform_domain_optimization; n_classes = n_classes, nc = nc, nt = nt)
else
thresh_domain, additional_info = limit_threshold_domain(aggr_thresholds, Yf, Wf, loss_function, test_op, min_samples_leaf, perform_domain_optimization)
end
# Look for the best threshold 'a', as in atoms like "feature >= a"
for (_threshold, threshold_info) in zip(thresh_domain, additional_info)
decision = decision_instantiator(_threshold)
# @show decision
# @show aggr_thresholds
# @logmsg LogDetail " Testing decision: $(displaydecision(decision))"
# println(displaydecision(i_modality, decision))
# TODO avoid ugly unpacking and figure out a different way of achieving this
# (test_op, _threshold) = (test_operator(decision), threshold(decision))
########################################################################
# Apply decision to all instances
########################################################################
# Note: unsatisfied is also changed
if isa(_is_classification, Val{true})
(ncr, nr, ncl, nl) = begin
if !isnothing(threshold_info) && !performing_consistency_check
threshold_info
else
# Re-initialize right counts
nr = zero(U)
ncr = fill(zero(U), n_classes)
if performing_consistency_check
unsatisfied .= 1
end
for i_instance in 1:_ninstances
gamma = aggr_thresholds[i_instance]
issat = SoleData.apply_test_operator(test_op, gamma, _threshold)
# @logmsg LogDetail " instance $i_instance/$_ninstances: (f=$(gamma)) -> issat = $(issat)"
# Note: in a fuzzy generalization, `issat` becomes a [0-1] value
if !issat
nr += Wf[i_instance]
ncr[Yf[i_instance]] += Wf[i_instance]
else
if performing_consistency_check
unsatisfied[i_instance] = 0
end
end
end
# ncl = Vector{U}(undef, n_classes)
# ncl .= nc .- ncr
ncl = nc .- ncr
nl = nt - nr
threshold_info_new = (ncr, nr, ncl, nl)
# if !isnothing(threshold_info) && !performing_consistency_check
# if threshold_info != threshold_info_new
# @show nc
# @show nt
# @show Yf
# @show Wf
# @show test_op
# @show _threshold
# @show threshold_info
# @show threshold_info_new
# readline()
# end
# end
threshold_info_new
end
end
else
(rsums, nr, lsums, nl, rsum, lsum) = begin
# Initialize right counts
# rssq = zero(U)
rsum = zero(U)
nr = zero(U)
# TODO experiment with running mean instead, because this may cause a lot of memory inefficiency
# https://it.wikipedia.org/wiki/Algoritmi_per_il_calcolo_della_varianza
rsums = Float64[] # Vector{U}(undef, _ninstances)
lsums = Float64[] # Vector{U}(undef, _ninstances)
if performing_consistency_check
unsatisfied .= 1
end
for i_instance in 1:_ninstances
gamma = aggr_thresholds[i_instance]
issat = SoleData.apply_test_operator(test_op, gamma, _threshold)
# @logmsg LogDetail " instance $i_instance/$_ninstances: (f=$(gamma)) -> issat = $(issat)"
# TODO make this satisfied a fuzzy value
if !issat
push!(rsums, sums[i_instance])
# rsums[i_instance] = sums[i_instance]
nr += Wf[i_instance]
rsum += sums[i_instance]
# rssq += ssqs[i_instance]
else
push!(lsums, sums[i_instance])
# lsums[i_instance] = sums[i_instance]
if performing_consistency_check
unsatisfied[i_instance] = 0
end
end
end
# Calculate left counts
lsum = tsum - rsum
# lssq = tssq - rssq
nl = nt - nr
(rsums, nr, lsums, nl, rsum, lsum)
end
end
####################################################################################
####################################################################################
####################################################################################
# @logmsg LogDebug " (n_left,n_right) = ($nl,$nr)"
# Honor min_samples_leaf
if !(nl >= min_samples_leaf && (_ninstances - nl) >= min_samples_leaf)
continue
end
purity_times_nt = begin
if isa(_is_classification, Val{true})
loss_function((ncl, nl), (ncr, nr))
else
purity = begin
if W isa Ones{Int}
loss_function(lsums, lsum, nl, rsums, rsum, nr)
else
error("TODO expand regression code to weigthed version!")
loss_function(lsums, ws_l, nl, rsums, ws_r, nr)
end
end
# TODO use loss_function instead
# ORIGINAL: TODO understand how it works
# purity_times_nt = (rsum * rsum / nr) + (lsum * lsum / nl)
# Variance with ssqs
# purity_times_nt = (rmean, lmean = rsum/nr, lsum/nl; - (nr * (rssq - 2*rmean*rsum + (rmean^2*nr)) / (nr-1) + (nl * (lssq - 2*lmean*lsum + (lmean^2*nl)) / (nl-1))))
# Variance
# var = (x)->sum((x.-StatsBase.mean(x)).^2) / (length(x)-1)
# purity_times_nt = - (nr * var(rsums)) + nl * var(lsums))
# Simil-variance that is easier to compute but it does not work with few samples on the leaves
# var = (x)->sum((x.-StatsBase.mean(x)).^2)
# purity_times_nt = - (var(rsums) + var(lsums))
# println("purity_times_nt: $(purity_times_nt)")
end
end::P
# If don't need to use lookahead, then I adopt the split only if it's better than the current one
# Otherwise, I adopt it.
if (
!(isa(_using_lookahead, Val{false}) || is_lookahead_basecase)
||
(purity_times_nt > node.purity_times_nt) # && !isapprox(purity_times_nt, node.purity_times_nt))
)
# DEBUGprintln((ncl,nl,ncr,nr), purity_times_nt)
node.i_modality = i_modality
node.purity_times_nt = purity_times_nt
node.decision = decision
# print(decision)
# println(" NEW BEST $node.i_modality, $node.purity_times_nt/nt")
# @logmsg LogDetail " Found new optimum in modality $(node.i_modality): " (node.purity_times_nt/nt) node.decision
#################################
node.consistency = begin
if performing_consistency_check
unsatisfied[1:_ninstances]
else
nr
end
end
# Short-circuit if you don't lookahead, and this is a perfect split
if (isa(_using_lookahead, Val{false}) || is_lookahead_basecase) && istoploss(loss_function, purity_times_nt)
# @show "Threshold shortcircuit!"
break
end
end
# In case of lookahead, temporarily accept the split,
# recurse on my children, and evaluate the purity of the whole subtree
if (isa(_using_lookahead, Val{true}) && lookahead_depth < lookahead)
Ss_copy = deepcopy(Ss)
idxs_copy = deepcopy(idxs) # TODO maybe this reset is not needed?
is_leaf = splitnode!(node, Ss_copy, idxs_copy)
if is_leaf
# TODO: evaluate the goodneess of the leaf?
else
# node.purity_times_nt
# purity_times_nt = loss_function((ncl, nl), (ncr, nr)) ...
for childnode in [node.l, node.r]
# rng_copy =
optimize_node!(
childnode,
Xs,
Y,
W,
grouped_featsaggrsnopss,
grouped_featsnaggrss,
lookahead_depth+1,
##########################################################################
Ss_copy,
##########################################################################
_is_classification,
_using_lookahead,
_perform_consistency_check
##########################################################################
;
loss_function = loss_function,
lookahead = lookahead,
max_depth = max_depth,
min_samples_leaf = min_samples_leaf,
min_purity_increase = min_purity_increase,
max_purity_at_leaf = max_purity_at_leaf,
##########################################################################
max_modal_depth = max_modal_depth,
n_subrelations = n_subrelations,
n_subfeatures = n_subfeatures,
allow_global_splits = allow_global_splits,
##########################################################################
idxs = deepcopy(idxs_copy),
n_classes = n_classes,
rng = copy(rng),
)
end
# TODO: evaluate the goodneess of the subtree?
end
end
end
end
# Finally accept the split.
if (isa(_using_lookahead, Val{false}) || is_lookahead_basecase)
splitnode!(node, Ss, idxs)
end
# println("END split!")
# readline()
# node
end
############################################################################################
############################################################################################
############################################################################################
@inline function _fit_tree(
Xs :: MultiLogiset, # modal dataset
Y :: AbstractVector{L}, # label vector
initconditions :: AbstractVector{<:InitialCondition}, # world starting conditions
W :: AbstractVector{U} # weight vector
;
##########################################################################
profile :: Symbol,
##########################################################################
lookahead :: Union{Nothing,Integer},
##########################################################################
_is_classification :: Union{Val{true},Val{false}},
_using_lookahead :: Union{Val{true},Val{false}},
_perform_consistency_check:: Union{Val{true},Val{false}},
##########################################################################
rng = Random.GLOBAL_RNG :: Random.AbstractRNG,
print_progress :: Bool = true,
kwargs...,
) where{L<:_Label,U}
_ninstances = ninstances(Xs)
if profile == :restricted
# Initialize world sets for each instance
Ss = RestrictedMCARTState.(ModalDecisionTrees.initialworldsets(Xs, initconditions))
D = RestrictedDecision
lookahead = 0
elseif profile == :full
error("TODO implement.")
lookahead = 1
else
error("Unexpected ModalCART profile: $(profile).")
end
# Distribution of the instances indices throughout the tree.
# It will be recursively permuted, and regions of it assigned to the tree nodes (idxs[node.region])
idxs = collect(1:_ninstances)
# Create root node
NodeMetaT = NodeMeta{(isa(_is_classification, Val{true}) ? Int64 : Float64),Float64,D}
onlyallowglobal = [(initcond == ModalDecisionTrees.start_without_world) for initcond in initconditions]
root = NodeMetaT(1:_ninstances, 0, 0, onlyallowglobal)
if print_progress
# p = ProgressThresh(Inf, 1, "Computing DTree...")
p = ProgressUnknown("Computing DTree... nodes: ", spinner=true)
end
permodality_groups = [
begin
_features = features(X)
_metaconditions = metaconditions(X)
_grouped_metaconditions = SoleData.grouped_metaconditions(_metaconditions, _features)
# _grouped_metaconditions::AbstractVector{<:AbstractVector{Tuple{<:ScalarMetaCondition}}}
# [[(i_metacond, aggregator, metacondition)...]...]
groups = [begin
aggrsnops = Dict{Aggregator,AbstractVector{<:ScalarMetaCondition}}()
aggregators_with_ids = Tuple{<:Integer,<:Aggregator}[]
for (i_metacond, aggregator, metacondition) in these_metaconditions
if !haskey(aggrsnops, aggregator)
aggrsnops[aggregator] = Vector{ScalarMetaCondition}()
end
push!(aggrsnops[aggregator], metacondition)
push!(aggregators_with_ids, (i_metacond,aggregator))
end
(aggrsnops, aggregators_with_ids)
end for (i_feature, (_feature, these_metaconditions)) in enumerate(_grouped_metaconditions)]
grouped_featsaggrsnops = first.(groups)
grouped_featsnaggrs = last.(groups)
# grouped_featsaggrsnops::AbstractVector{<:AbstractDict{<:Aggregator,<:AbstractVector{<:ScalarMetaCondition}}}
# [Dict([aggregator => [metacondition...]]...)...]
# grouped_featsnaggrs::AbstractVector{<:AbstractVector{Tuple{<:Integer,<:Aggregator}}}
# [[(i_metacond,aggregator)...]...]
(grouped_featsaggrsnops, grouped_featsnaggrs)
end for X in eachmodality(Xs)]
grouped_featsaggrsnopss = first.(permodality_groups)
grouped_featsnaggrss = last.(permodality_groups)
# Process nodes recursively, using multi-threading
function process_node!(node, rng)
# Note: better to spawn rng's beforehand, to preserve reproducibility independently from optimize_node!
rng_l = spawn(rng)
rng_r = spawn(rng)
@inbounds optimize_node!(
node,
Xs,
Y,
W,
grouped_featsaggrsnopss,
grouped_featsnaggrss,
0,
################################################################################
Ss,
################################################################################
_is_classification,
_using_lookahead,
_perform_consistency_check
################################################################################
;
idxs = idxs,
rng = rng,
lookahead = lookahead,
kwargs...,
)
# !print_progress || ProgressMeter.update!(p, node.purity)
!print_progress || ProgressMeter.next!(p, spinner="⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏")
if !node.is_leaf
l = Threads.@spawn process_node!(node.l, rng_l)
r = Threads.@spawn process_node!(node.r, rng_r)
wait(l), wait(r)
end
end
@sync Threads.@spawn process_node!(root, rng)
!print_progress || ProgressMeter.finish!(p)
return (root, idxs)
end
##############################################################################
##############################################################################
##############################################################################
##############################################################################
@inline function check_input(
Xs :: MultiLogiset,
Y :: AbstractVector{S},
initconditions :: Vector{<:InitialCondition},
W :: AbstractVector{U}
;
##########################################################################
profile :: Symbol,
##########################################################################
loss_function :: Loss,
lookahead :: Union{Nothing,Integer},
max_depth :: Union{Nothing,Int},
min_samples_leaf :: Int,
min_purity_increase :: AbstractFloat,
max_purity_at_leaf :: AbstractFloat,
##########################################################################
max_modal_depth :: Union{Nothing,Int},
n_subrelations :: Vector{<:Function},
n_subfeatures :: Vector{<:Integer},
allow_global_splits :: Vector{Bool},
##########################################################################
kwargs...,
) where {S,U}
_ninstances = ninstances(Xs)
if length(Y) != _ninstances
error("Dimension mismatch between dataset and label vector Y: ($(_ninstances)) vs $(size(Y))")
elseif length(W) != _ninstances
error("Dimension mismatch between dataset and weights vector W: ($(_ninstances)) vs $(size(W))")
############################################################################
elseif length(n_subrelations) != nmodalities(Xs)
error("Mismatching number of n_subrelations with number of modalities: $(length(n_subrelations)) vs $(nmodalities(Xs))")
elseif length(n_subfeatures) != nmodalities(Xs)
error("Mismatching number of n_subfeatures with number of modalities: $(length(n_subfeatures)) vs $(nmodalities(Xs))")
elseif length(initconditions) != nmodalities(Xs)
error("Mismatching number of initconditions with number of modalities: $(length(initconditions)) vs $(nmodalities(Xs))")
elseif length(allow_global_splits) != nmodalities(Xs)
error("Mismatching number of allow_global_splits with number of modalities: $(length(allow_global_splits)) vs $(nmodalities(Xs))")
############################################################################
# elseif any(nrelations.(eachmodality(Xs)) .< n_subrelations)
# error("In at least one modality the total number of relations is less than the number "
# * "of relations required at each split\n"
# * "# relations: " * string(nrelations.(eachmodality(Xs))) * "\n\tvs\n"
# * "# subrelations: " * string(n_subrelations |> collect))
# elseif length(findall(n_subrelations .< 0)) > 0
# error("Total number of relations $(n_subrelations) must be >= zero ")
elseif any(nfeatures.(eachmodality(Xs)) .< n_subfeatures)
error("In at least one modality the total number of features is less than the number "
* "of features required at each split\n"
* "# features: " * string(nfeatures.(eachmodality(Xs))) * "\n\tvs\n"
* "# subfeatures: " * string(n_subfeatures |> collect))
elseif length(findall(n_subfeatures .< 0)) > 0
error("Total number of features $(n_subfeatures) must be >= zero ")
elseif min_samples_leaf < 1
error("Min_samples_leaf must be a positive integer "
* "(given $(min_samples_leaf))")
# if loss_function in [entropy]
# max_purity_at_leaf_thresh = 0.75 # min_purity_increase 0.01
# min_purity_increase_thresh = 0.5
# if (max_purity_at_leaf >= max_purity_at_leaf_thresh)
# println("Warning! It is advised to use max_purity_at_leaf<$(max_purity_at_leaf_thresh) with loss $(loss_function)"
# * "(given $(max_purity_at_leaf))")
# elseif (min_purity_increase >= min_purity_increase_thresh)
# println("Warning! It is advised to use max_purity_at_leaf<$(min_purity_increase_thresh) with loss $(loss_function)"
# * "(given $(min_purity_increase))")
# end
# elseif loss_function in [gini, zero_one] && (max_purity_at_leaf > 1.0 || max_purity_at_leaf <= 0.0)
# error("Max_purity_at_leaf for loss $(loss_function) must be in (0,1]"
# * "(given $(max_purity_at_leaf))")
elseif !isnothing(max_depth) && max_depth < 0
error("Unexpected value for max_depth: $(max_depth) (expected:"
* " max_depth >= 0, or max_depth = nothing for unbounded depth)")
elseif !isnothing(max_modal_depth) && max_modal_depth < 0
error("Unexpected value for max_modal_depth: $(max_modal_depth) (expected:"
* " max_modal_depth >= 0, or max_modal_depth = nothing for unbounded depth)")
end
if !(profile in [:restricted, :full])
error("Unexpected ModalCART profile: $(profile).")
end
if !isnothing(lookahead) && !(lookahead >= 0)
error("Unexpected value for lookahead: $(lookahead) (expected:"
* " lookahead >= 0)")
end
if SoleData.hasnans(Xs)
error("This algorithm does not allow NaN values")
end
if nothing in Y
error("This algorithm does not allow nothing values in Y")
elseif eltype(Y) <: Number && any(isnan.(Y))
error("This algorithm does not allow NaN values in Y")
elseif nothing in W
error("This algorithm does not allow nothing values in W")
elseif any(isnan.(W))
error("This algorithm does not allow NaN values in W")
end
end
############################################################################################
############################################################################################
############################################################################################
################################################################################
function fit_tree(
# modal dataset
Xs :: MultiLogiset,
# label vector
Y :: AbstractVector{L},
# world starting conditions
initconditions :: Vector{<:InitialCondition},
# Weights (unary weigths are used if no weight is supplied)
W :: AbstractVector{U} = default_weights(Y)
# W :: AbstractVector{U} = Ones{Int}(ninstances(Xs)), # TODO check whether this is faster
;
# Learning profile (e.g., restricted, full...)
profile :: Symbol = :restricted,
# Lookahead parameter (i.e., depth of the trees to locally optimize for)
lookahead :: Union{Nothing,Integer} = nothing,
# Perform minification: transform dataset so that learning happens faster
use_minification :: Bool,
# Debug-only: checks the consistency of the dataset during training
perform_consistency_check :: Bool,
kwargs...,
) where {L<:Union{CLabel,RLabel}, U}
# Check validity of the input
check_input(Xs, Y, initconditions, W; profile = profile, lookahead = lookahead, kwargs...)
# Classification-only: transform labels to categorical form (indexed by integers)
n_classes = begin
if L<:CLabel
class_names, Y = get_categorical_form(Y)
length(class_names)
else
0 # dummy value for the case of regression
end
end
Xs, threshold_backmaps = begin
if use_minification
minify(Xs)
else
Xs, fill(identity, nmodalities(Xs))
end
end
# println(threshold_backmaps)
# Call core learning function
root, idxs = _fit_tree(Xs, Y, initconditions, W,
;
_is_classification = Val(L<:CLabel),
_using_lookahead = Val((lookahead > 0)),
_perform_consistency_check = Val(perform_consistency_check),
profile = profile,
lookahead = lookahead,
n_classes = n_classes,
kwargs...
)
# Finally create Tree
root = begin
if L<:CLabel
_convert(root, map((y)->class_names[y], Y[idxs]), class_names, threshold_backmaps)
else
_convert(root, Y[idxs], threshold_backmaps)
end
end
DTree{L}(root, worldtype.(eachmodality(Xs)), initconditions)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3307 | __precompile__()
module ModalDecisionTrees
############################################################################################
import Base: show, length
using FunctionWrappers: FunctionWrapper
using Logging: LogLevel, @logmsg
using Printf
using ProgressMeter
using Random
using Reexport
using StatsBase
using SoleBase
using SoleBase: LogOverview, LogDebug, LogDetail
using SoleBase: spawn, nat_sort
using SoleBase: CLabel, RLabel, Label, _CLabel, _Label, get_categorical_form
using SoleBase: bestguess, default_weights, slice_weights
using SoleData
using SoleData: nvariables,
get_instance,
slicedataset
using FillArrays
using SoleData: AbstractModalLogiset
import SoleData: feature, test_operator, threshold
import AbstractTrees: print_tree
# Data structures
@reexport using SoleData.DimensionalDatasets
using SoleData: MultiLogiset
using SoleData: Worlds
using SoleData: nfeatures, nrelations,
nmodalities, eachmodality, modality,
displaystructure,
#
relations,
#
MultiLogiset,
SupportedLogiset
using SoleData: AbstractWorld, AbstractRelation
using SoleData: AbstractWorlds, Worlds
using SoleData: worldtype
using SoleData: OneWorld
using SoleData: Interval, Interval2D
using SoleData: IARelations, IA2DRelations
using SoleLogics: FullDimensionalFrame
using SoleLogics: normalize
using SoleData: existential_aggregator, universal_aggregator, aggregator_bottom
using SoleModels
import SoleModels: nnodes
import SoleModels: nleaves
import SoleModels: height
############################################################################################
export slicedataset,
nmodalities, ninstances, nvariables
export DTree, # Decision tree
DForest, # Decision forest
RootLevelNeuroSymbolicHybrid, # Root-level neurosymbolic hybrid model
#
nnodes, height, modalheight
############################################################################################
ModalityId = Int
# Utility functions
include("utils.jl")
# Loss functions
include("loss-functions.jl")
# Purity helpers
include("purity.jl")
export RestrictedDecision,
ScalarExistentialFormula,
displaydecision
# Definitions for Decision Leaf, Internal, Node, Tree & Forest
include("base.jl")
include("print.jl")
# # Default parameter values
include("default-parameters.jl")
# Metrics for assessing the goodness of a decision leaf/rule
include("leaf-metrics.jl")
# One-step decisions
include("interpret-onestep-decisions.jl")
# Build a decision tree/forest from a dataset
include("build.jl")
# Perform post-hoc manipulation/analysis on a decision tree/forest (e.g., pruning)
include("posthoc.jl")
# Apply decision tree/forest to a dataset
include("apply.jl")
export ModalDecisionTree, ModalRandomForest
export depth, wrapdataset
# Interfaces
include("interfaces/Sole/main.jl")
include("interfaces/MLJ.jl")
include("interfaces/AbstractTrees.jl")
# Experimental features
include("experimentals/main.jl")
# Example datasets
include("other/example-datasets.jl")
end # module
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 27660 | using MLJModelInterface: classes
using StatsBase
export apply_tree, apply_forest, apply_model, printapply, tree_walk_metrics
export sprinkle
import SoleModels: apply
############################################################################################
############################################################################################
############################################################################################
function apply end
apply_model = apply
# apply_tree = apply_model
@deprecate apply_tree apply_model
# apply_forest = apply_model
@deprecate apply_forest apply_model
function apply_proba end
apply_model_proba = apply_proba
# apply_tree_proba = apply_model_proba
@deprecate apply_tree_proba apply_model_proba
# apply_trees_proba = apply_model_proba
@deprecate apply_trees_proba apply_model_proba
# apply_forest_proba = apply_model_proba
@deprecate apply_forest_proba apply_model_proba
############################################################################################
############################################################################################
############################################################################################
mm_instance_initialworldset(Xs, tree::DTree, i_instance::Integer) = begin
Ss = Vector{Worlds}(undef, nmodalities(Xs))
for (i_modality,X) in enumerate(eachmodality(Xs))
Ss[i_modality] = initialworldset(X, i_instance, initconditions(tree)[i_modality])
end
Ss
end
softmax(v::AbstractVector) = exp.(v) ./ sum(exp.(v))
softmax(m::AbstractMatrix) = mapslices(softmax, m; dims=1)
############################################################################################
############################################################################################
############################################################################################
printapply(model::SymbolicModel, args...; kwargs...) = printapply(stdout, model, args...; kwargs...)
# printapply_proba(model::SymbolicModel, args...; kwargs...) = printapply_proba(stdout, model, args...; kwargs...)
function printapply(io::IO, model::SymbolicModel, Xs, Y::AbstractVector; kwargs...)
predictions, newmodel = sprinkle(model, Xs, Y)
printmodel(io, newmodel; kwargs...)
predictions, newmodel
end
# function printapply_proba(io::IO, model::SymbolicModel, Xs, Y::AbstractVector; kwargs...)
# predictions, newmodel = apply_proba(model, Xs, Y TODO)
# printmodel(io, newmodel; kwargs...)
# predictions, newmodel
# end
################################################################################
# Apply models: predict labels for a new dataset of instances
################################################################################
function apply(leaf::DTLeaf, Xs, i_instance::Integer, worlds::AbstractVector{<:AbstractWorlds}; suppress_parity_warning = false)
prediction(leaf)
end
function apply(leaf::NSDTLeaf, Xs, i_instance::Integer, worlds::AbstractVector{<:AbstractWorlds}; suppress_parity_warning = false)
# if Xs isa AbstractVector
# println(length(Xs))
# println(typeof(first(Xs)))
# println(first(Xs))
# end
d = slicedataset(Xs, [i_instance])
# println(typeof(d))
# println(hasmethod(length, (typeof(d),)) ? length(d) : nothing)
# println(hasmethod(size, (typeof(d),)) ? size(d) : nothing)
preds = leaf.predicting_function(d)
@assert length(preds) == 1 "Error in apply(::NSDTLeaf, ...) The predicting function returned some malformed output. Expected is a Vector of a single prediction, while the returned value is:\n$(preds)\n$(hasmethod(length, (typeof(preds),)) ? length(preds) : "(length = $(length(preds)))")\n$(hasmethod(size, (typeof(preds),)) ? size(preds) : "(size = $(size(preds)))")"
# println(preds)
# println(typeof(preds))
preds[1]
end
function apply(tree::DTInternal, Xs, i_instance::Integer, worlds::AbstractVector{<:AbstractWorlds}; kwargs...)
@logmsg LogDetail "applying branch..."
@logmsg LogDetail " worlds" worlds
(satisfied,new_worlds) =
modalstep(
modality(Xs, i_modality(tree)),
i_instance,
worlds[i_modality(tree)],
decision(tree),
)
worlds[i_modality(tree)] = new_worlds
@logmsg LogDetail " ->(satisfied,worlds')" satisfied worlds
apply((satisfied ? left(tree) : right(tree)), Xs, i_instance, worlds; kwargs...)
end
# Obtain predictions of a tree on a dataset
function apply(tree::DTree{L}, Xs; print_progress = !(Xs isa MultiLogiset), kwargs...) where {L}
@logmsg LogDetail "apply..."
_ninstances = ninstances(Xs)
predictions = Vector{L}(undef, _ninstances)
if print_progress
p = Progress(_ninstances; dt = 1, desc = "Applying tree...")
end
Threads.@threads for i_instance in 1:_ninstances
@logmsg LogDetail " instance $i_instance/$_ninstances"
# TODO figure out: is it better to interpret the whole dataset at once, or instance-by-instance? The first one enables reusing training code
worlds = mm_instance_initialworldset(Xs, tree, i_instance)
predictions[i_instance] = apply(root(tree), Xs, i_instance, worlds; kwargs...)
print_progress && next!(p)
end
predictions
end
# use an array of trees to test features
function apply(
trees::AbstractVector{<:DTree{<:L}},
Xs;
print_progress = !(Xs isa MultiLogiset),
suppress_parity_warning = false,
tree_weights::Union{AbstractMatrix{Z},AbstractVector{Z},Nothing} = nothing,
) where {L<:Label,Z<:Real}
@logmsg LogDetail "apply..."
ntrees = length(trees)
_ninstances = ninstances(Xs)
if !(tree_weights isa AbstractMatrix)
if isnothing(tree_weights)
tree_weights = Ones{Int}(length(trees), ninstances(Xs)) # TODO optimize?
elseif tree_weights isa AbstractVector
tree_weights = hcat([tree_weights for i_instance in 1:ninstances(Xs)]...)
else
@show typeof(tree_weights)
error("Unexpected tree_weights encountered $(tree_weights).")
end
end
@assert length(trees) == size(tree_weights, 1) "Each label must have a corresponding weight: labels length is $(length(labels)) and weights length is $(length(weights))."
@assert ninstances(Xs) == size(tree_weights, 2) "Each label must have a corresponding weight: labels length is $(length(labels)) and weights length is $(length(weights))."
# apply each tree to the whole dataset
_predictions = Matrix{L}(undef, ntrees, _ninstances)
if print_progress
p = Progress(ntrees; dt = 1, desc = "Applying trees...")
end
Threads.@threads for i_tree in 1:ntrees
_predictions[i_tree,:] = apply(trees[i_tree], Xs; print_progress = false, suppress_parity_warning = suppress_parity_warning)
print_progress && next!(p)
end
# for each instance, aggregate the predictions
predictions = Vector{L}(undef, _ninstances)
Threads.@threads for i_instance in 1:_ninstances
predictions[i_instance] = bestguess(
_predictions[:,i_instance],
tree_weights[:,i_instance];
suppress_parity_warning = suppress_parity_warning
)
end
predictions
end
# use a proper forest to test features
function apply(
forest::DForest,
Xs;
suppress_parity_warning = false,
weight_trees_by::Union{Bool,Symbol,AbstractVector} = false,
)
if weight_trees_by == false
apply(trees(forest), Xs; suppress_parity_warning = suppress_parity_warning)
elseif isa(weight_trees_by, AbstractVector)
apply(trees(forest), Xs; suppress_parity_warning = suppress_parity_warning, tree_weights = weight_trees_by)
# elseif weight_trees_by == :accuracy
# # TODO: choose HOW to weight a tree... overall_accuracy is just an example (maybe can be parameterized)
# apply(forest.trees, Xs; tree_weights = map(cm -> overall_accuracy(cm), get(forest.metrics, :oob_metrics...)))
else
error("Unexpected value for weight_trees_by: $(weight_trees_by)")
end
end
function apply(
nsdt::RootLevelNeuroSymbolicHybrid,
Xs;
suppress_parity_warning = false,
)
W = softmax(nsdt.feature_function(Xs))
apply(nsdt.trees, Xs; suppress_parity_warning = suppress_parity_warning, tree_weights = W)
end
################################################################################
# Sprinkle: distribute dataset instances throughout a tree
################################################################################
function _empty_tree_leaves(leaf::DTLeaf{L}) where {L}
DTLeaf{L}(prediction(leaf), L[])
end
function _empty_tree_leaves(leaf::NSDTLeaf{L}) where {L}
NSDTLeaf{L}(leaf.predicting_function, L[], leaf.supp_valid_labels, L[], leaf.supp_valid_predictions)
end
function _empty_tree_leaves(node::DTInternal)
return DTInternal(
i_modality(node),
decision(node),
_empty_tree_leaves(this(node)),
_empty_tree_leaves(left(node)),
_empty_tree_leaves(right(node)),
)
end
function _empty_tree_leaves(tree::DTree)
return DTree(
_empty_tree_leaves(root(tree)),
worldtypes(tree),
initconditions(tree),
)
end
function sprinkle(
leaf::DTLeaf{L},
Xs,
i_instance::Integer,
worlds::AbstractVector{<:AbstractWorlds},
y::L;
update_labels = false,
suppress_parity_warning = false,
) where {L<:Label}
_supp_labels = L[supp_labels(leaf)..., y]
_prediction = begin
if update_labels
bestguess(supp_labels(leaf), suppress_parity_warning = suppress_parity_warning)
else
prediction(leaf)
end
end
_prediction, DTLeaf(_prediction, _supp_labels)
end
function sprinkle(
leaf::NSDTLeaf{L},
Xs,
i_instance::Integer,
worlds::AbstractVector{<:AbstractWorlds},
y::L;
update_labels = false,
suppress_parity_warning = false,
) where {L<:Label}
pred = apply(leaf, Xs, i_instance, worlds; suppress_parity_warning = suppress_parity_warning)
_supp_train_labels = L[leaf.supp_train_labels..., y]
_supp_train_predictions = L[leaf.supp_train_predictions..., pred]
_predicting_function = begin
if update_labels
error("TODO expand code retrain")
else
leaf.predicting_function
end
end
pred, NSDTLeaf{L}(_predicting_function, _supp_train_labels, leaf.supp_valid_labels, _supp_train_predictions, leaf.supp_valid_predictions)
end
function sprinkle(
tree::DTInternal{L},
Xs,
i_instance::Integer,
worlds::AbstractVector{<:AbstractWorlds},
y::L;
kwargs...,
) where {L}
(satisfied,new_worlds) =
modalstep(
modality(Xs, i_modality(tree)),
i_instance,
worlds[i_modality(tree)],
decision(tree)
)
# if satisfied
# println("new_worlds: $(new_worlds)")
# end
worlds[i_modality(tree)] = new_worlds
this_prediction, this_leaf = sprinkle(this(tree), Xs, i_instance, worlds, y; kwargs...) # TODO test whether this works correctly
pred, left_leaf, right_leaf = begin
if satisfied
pred, left_leaf = sprinkle(left(tree), Xs, i_instance, worlds, y; kwargs...)
pred, left_leaf, right(tree)
else
pred, right_leaf = sprinkle(right(tree), Xs, i_instance, worlds, y; kwargs...)
pred, left(tree), right_leaf
end
end
pred, DTInternal(i_modality(tree), decision(tree), this_leaf, left_leaf, right_leaf)
end
function sprinkle(
tree::DTree{L},
Xs,
Y::AbstractVector{<:L};
print_progress = !(Xs isa MultiLogiset),
reset_leaves = true,
kwargs...,
) where {L}
# Reset
tree = (reset_leaves ? _empty_tree_leaves(tree) : tree)
predictions = Vector{L}(undef, ninstances(Xs))
_root = root(tree)
# Propagate instances down the tree
if print_progress
p = Progress(ninstances(Xs); dt = 1, desc = "Applying tree...")
end
# Note: no multi-threading
for i_instance in 1:ninstances(Xs)
worlds = mm_instance_initialworldset(Xs, tree, i_instance)
pred, _root = sprinkle(_root, Xs, i_instance, worlds, Y[i_instance]; kwargs...)
predictions[i_instance] = pred
print_progress && next!(p)
end
predictions, DTree(_root, worldtypes(tree), initconditions(tree))
end
# use an array of trees to test features
function sprinkle(
trees::AbstractVector{<:DTree{<:L}},
Xs,
Y::AbstractVector{<:L};
print_progress = !(Xs isa MultiLogiset),
tree_weights::Union{AbstractMatrix{Z},AbstractVector{Z},Nothing} = nothing,
suppress_parity_warning = false,
) where {L<:Label,Z<:Real}
@logmsg LogDetail "sprinkle..."
trees = deepcopy(trees)
ntrees = length(trees)
_ninstances = ninstances(Xs)
if !(tree_weights isa AbstractMatrix)
if isnothing(tree_weights)
tree_weights = Ones{Int}(length(trees), ninstances(Xs)) # TODO optimize?
elseif tree_weights isa AbstractVector
tree_weights = hcat([tree_weights for i_instance in 1:ninstances(Xs)]...)
else
@show typeof(tree_weights)
error("Unexpected tree_weights encountered $(tree_weights).")
end
end
@assert length(trees) == size(tree_weights, 1) "Each label must have a corresponding weight: labels length is $(length(labels)) and weights length is $(length(weights))."
@assert ninstances(Xs) == size(tree_weights, 2) "Each label must have a corresponding weight: labels length is $(length(labels)) and weights length is $(length(weights))."
# apply each tree to the whole dataset
_predictions = Matrix{L}(undef, ntrees, _ninstances)
if print_progress
p = Progress(ntrees; dt = 1, desc = "Applying trees...")
end
Threads.@threads for i_tree in 1:ntrees
_predictions[i_tree,:], trees[i_tree] = sprinkle(trees[i_tree], Xs, Y; print_progress = false)
print_progress && next!(p)
end
# for each instance, aggregate the predictions
predictions = Vector{L}(undef, _ninstances)
Threads.@threads for i_instance in 1:_ninstances
predictions[i_instance] = bestguess(
_predictions[:,i_instance],
tree_weights[:,i_instance];
suppress_parity_warning = suppress_parity_warning
)
end
predictions, trees
end
# use a proper forest to test features
function sprinkle(
forest::DForest,
Xs,
Y::AbstractVector{<:L};
weight_trees_by::Union{Bool,Symbol,AbstractVector} = false,
kwargs...
) where {L<:Label}
predictions, trees = begin
if weight_trees_by == false
sprinkle(trees(forest), Xs, Y; kwargs...)
elseif isa(weight_trees_by, AbstractVector)
sprinkle(trees(forest), Xs, Y; tree_weights = weight_trees_by, kwargs...)
# elseif weight_trees_by == :accuracy
# # TODO: choose HOW to weight a tree... overall_accuracy is just an example (maybe can be parameterized)
# sprinkle(forest.trees, Xs; tree_weights = map(cm -> overall_accuracy(cm), get(forest.metrics, :oob_metrics...)))
else
error("Unexpected value for weight_trees_by: $(weight_trees_by)")
end
end
predictions, DForest{L}(trees, (;)) # TODO note that the original metrics are lost here
end
function sprinkle(
nsdt::RootLevelNeuroSymbolicHybrid,
Xs,
Y::AbstractVector{<:L};
suppress_parity_warning = false,
kwargs...
) where {L<:Label}
W = softmax(nsdt.feature_function(Xs))
predictions, trees = sprinkle(
nsdt.trees,
Xs,
Y;
suppress_parity_warning = suppress_parity_warning,
tree_weights = W,
kwargs...,
)
predictions, RootLevelNeuroSymbolicHybrid(nsdt.feature_function, trees, (;)) # TODO note that the original metrics are lost here
end
############################################################################################
# using Distributions
using Distributions: fit, Normal
using CategoricalDistributions
using CategoricalDistributions: UnivariateFinite
using CategoricalArrays
function apply_proba(leaf::DTLeaf, Xs, i_instance::Integer, worlds::AbstractVector{<:AbstractWorlds})
supp_labels(leaf)
end
function apply_proba(tree::DTInternal, Xs, i_instance::Integer, worlds::AbstractVector{<:AbstractWorlds})
@logmsg LogDetail "applying branch..."
@logmsg LogDetail " worlds" worlds
(satisfied,new_worlds) =
modalstep(
modality(Xs, i_modality(tree)),
i_instance,
worlds[i_modality(tree)],
decision(tree),
)
worlds[i_modality(tree)] = new_worlds
@logmsg LogDetail " ->(satisfied,worlds')" satisfied worlds
apply_proba((satisfied ? left(tree) : right(tree)), Xs, i_instance, worlds)
end
# Obtain predictions of a tree on a dataset
function apply_proba(tree::DTree{L}, Xs, _classes; return_scores = false, suppress_parity_warning = false) where {L<:CLabel}
@logmsg LogDetail "apply_proba..."
_classes = string.(_classes)
_ninstances = ninstances(Xs)
prediction_scores = Matrix{Float64}(undef, _ninstances, length(_classes))
for i_instance in 1:_ninstances
@logmsg LogDetail " instance $i_instance/$_ninstances"
# TODO figure out: is it better to interpret the whole dataset at once, or instance-by-instance? The first one enables reusing training code
worlds = mm_instance_initialworldset(Xs, tree, i_instance)
this_prediction_scores = apply_proba(root(tree), Xs, i_instance, worlds)
# d = fit(UnivariateFinite, categorical(this_prediction_scores; levels = _classes))
d = begin
c = categorical(collect(this_prediction_scores); levels = _classes)
cc = countmap(c)
s = [get(cc, cl, 0) for cl in classes(c)]
UnivariateFinite(classes(c), s ./ sum(s))
end
prediction_scores[i_instance, :] .= [pdf(d, c) for c in _classes]
end
if return_scores
prediction_scores
else
UnivariateFinite(_classes, prediction_scores, pool=missing)
end
end
# Obtain predictions of a tree on a dataset
function apply_proba(tree::DTree{L}, Xs, _classes = nothing; return_scores = false, suppress_parity_warning = false) where {L<:RLabel}
@logmsg LogDetail "apply_proba..."
_ninstances = ninstances(Xs)
prediction_scores = Vector{Vector{Float64}}(undef, _ninstances)
for i_instance in 1:_ninstances
@logmsg LogDetail " instance $i_instance/$_ninstances"
# TODO figure out: is it better to interpret the whole dataset at once, or instance-by-instance? The first one enables reusing training code
worlds = mm_instance_initialworldset(Xs, tree, i_instance)
prediction_scores[i_instance] = apply_proba(tree.root, Xs, i_instance, worlds)
end
if return_scores
prediction_scores
else
[fit(Normal, sc) for sc in prediction_scores]
end
end
# use an array of trees to test features
function apply_proba(
trees::AbstractVector{<:DTree{<:L}},
Xs,
_classes;
tree_weights::Union{AbstractMatrix{Z},AbstractVector{Z},Nothing} = nothing,
suppress_parity_warning = false
) where {L<:CLabel,Z<:Real}
@logmsg LogDetail "apply_proba..."
_classes = string.(_classes)
ntrees = length(trees)
_ninstances = ninstances(Xs)
if !(tree_weights isa AbstractMatrix)
if isnothing(tree_weights)
tree_weights = nothing # Ones{Int}(length(trees), ninstances(Xs)) # TODO optimize?
elseif tree_weights isa AbstractVector
tree_weights = hcat([tree_weights for i_instance in 1:ninstances(Xs)]...)
else
@show typeof(tree_weights)
error("Unexpected tree_weights encountered $(tree_weights).")
end
end
@assert isnothing(tree_weights) || length(trees) == size(tree_weights, 1) "Each label must have a corresponding weight: labels length is $(length(labels)) and weights length is $(length(weights))."
@assert isnothing(tree_weights) || ninstances(Xs) == size(tree_weights, 2) "Each label must have a corresponding weight: labels length is $(length(labels)) and weights length is $(length(weights))."
# apply each tree to the whole dataset
_predictions = Array{Float64,3}(undef, _ninstances, ntrees, length(_classes))
Threads.@threads for i_tree in 1:ntrees
_predictions[:,i_tree,:] = apply_proba(trees[i_tree], Xs, _classes; return_scores = true)
end
# Average the prediction scores
if isnothing(tree_weights)
bestguesses_idx = mapslices(argmax, _predictions; dims=3)
# @show bestguesses_idx
bestguesses = dropdims(map(idx->_classes[idx], bestguesses_idx); dims=3)
ret = map(this_prediction_scores->begin
c = categorical(this_prediction_scores; levels = _classes)
cc = countmap(c)
s = [get(cc, cl, 0) for cl in classes(c)]
UnivariateFinite(classes(c), s ./ sum(s))
end, eachslice(bestguesses; dims=1))
# ret = map(s->bestguess(s; suppress_parity_warning = suppress_parity_warning), eachslice(bestguesses; dims=1))
# @show ret
ret
# x = map(x->_classes[argmax(x)], eachslice(_predictions; dims=[1,2]))
# dropdims(mean(_predictions; dims=2), dims=2)
else
# TODO fix this, it errors.
tree_weights = tree_weights./sum(tree_weights)
prediction_scores = Matrix{Float64}(undef, _ninstances, length(_classes))
Threads.@threads for i in 1:_ninstances
prediction_scores[i,:] .= mean(_predictions[i,:,:] * tree_weights; dims=1)
end
prediction_scores
end
end
# use an array of trees to test features
function apply_proba(
trees::AbstractVector{<:DTree{<:L}},
Xs,
classes = nothing;
tree_weights::Union{Nothing,AbstractVector{Z}} = nothing,
kwargs...
) where {L<:RLabel,Z<:Real}
@logmsg LogDetail "apply_proba..."
ntrees = length(trees)
_ninstances = ninstances(Xs)
if !isnothing(tree_weights)
@assert length(trees) === length(tree_weights) "Each label must have a corresponding weight: labels length is $(length(labels)) and weights length is $(length(weights))."
end
# apply each tree to the whole dataset
prediction_scores = Matrix{Vector{Float64}}(undef, _ninstances, ntrees)
# Threads.@threads for i_tree in 1:ntrees
for i_tree in 1:ntrees
prediction_scores[:,i_tree] = apply_proba(trees[i_tree], Xs; return_scores = true, kwargs...)
end
# Average the prediction scores
if isnothing(tree_weights)
# @show prediction_scores
# @show collect(eachrow(prediction_scores))
# @show ([vcat(sc...) for sc in eachrow(prediction_scores)])
# @show collect([vcat.(sc...) for sc in eachrow(prediction_scores)])
[fit(Normal, vcat(sc...)) for sc in eachrow(prediction_scores)]
# Vector{Vector{Float64}}([vcat(_inst_predictions...) for _inst_predictions in eachrow(_predictions)])
else
error("TODO expand code")
end
end
# use a proper forest to test features
function apply_proba(
forest::DForest{L},
Xs,
args...;
weight_trees_by::Union{Bool,Symbol,AbstractVector} = false,
kwargs...
) where {L<:Label}
if weight_trees_by == false
apply_proba(trees(forest), Xs, args...; kwargs...)
elseif isa(weight_trees_by, AbstractVector)
apply_proba(trees(forest), Xs, args...; tree_weights = weight_trees_by, kwargs...)
# elseif weight_trees_by == :accuracy
# # TODO: choose HOW to weight a tree... overall_accuracy is just an example (maybe can be parameterized)
# apply_proba(forest.trees, Xs, args...; tree_weights = map(cm -> overall_accuracy(cm), get(forest.metrics, :oob_metrics...)))
else
error("Unexpected value for weight_trees_by: $(weight_trees_by)")
end
end
############################################################################################
# function tree_walk_metrics(leaf::DTLeaf; n_tot_inst = nothing, best_rule_params = [])
# if isnothing(n_tot_inst)
# n_tot_inst = ninstances(leaf)
# end
# matches = findall(leaf.supp_labels .== predictions(leaf))
# n_correct = length(matches)
# n_inst = length(leaf.supp_labels)
# metrics = Dict()
# confidence = n_correct/n_inst
# metrics["_ninstances"] = n_inst
# metrics["n_correct"] = n_correct
# metrics["avg_confidence"] = confidence
# metrics["best_confidence"] = confidence
# if !isnothing(n_tot_inst)
# support = n_inst/n_tot_inst
# metrics["avg_support"] = support
# metrics["support"] = support
# metrics["best_support"] = support
# for best_rule_p in best_rule_params
# if (haskey(best_rule_p, :min_confidence) && best_rule_p.min_confidence > metrics["best_confidence"]) ||
# (haskey(best_rule_p, :min_support) && best_rule_p.min_support > metrics["best_support"])
# metrics["best_rule_t=$(best_rule_p)"] = -Inf
# else
# metrics["best_rule_t=$(best_rule_p)"] = metrics["best_confidence"] * best_rule_p.t + metrics["best_support"] * (1-best_rule_p.t)
# end
# end
# end
# metrics
# end
# function tree_walk_metrics(tree::DTInternal; n_tot_inst = nothing, best_rule_params = [])
# if isnothing(n_tot_inst)
# n_tot_inst = ninstances(tree)
# end
# # TODO visit also tree.this
# metrics_l = tree_walk_metrics(tree.left; n_tot_inst = n_tot_inst, best_rule_params = best_rule_params)
# metrics_r = tree_walk_metrics(tree.right; n_tot_inst = n_tot_inst, best_rule_params = best_rule_params)
# metrics = Dict()
# # Number of instances passing through the node
# metrics["_ninstances"] =
# metrics_l["_ninstances"] + metrics_r["_ninstances"]
# # Number of correct instances passing through the node
# metrics["n_correct"] =
# metrics_l["n_correct"] + metrics_r["n_correct"]
# # Average confidence of the subtree
# metrics["avg_confidence"] =
# (metrics_l["_ninstances"] * metrics_l["avg_confidence"] +
# metrics_r["_ninstances"] * metrics_r["avg_confidence"]) /
# (metrics_l["_ninstances"] + metrics_r["_ninstances"])
# # Average support of the subtree (Note to self: weird...?)
# metrics["avg_support"] =
# (metrics_l["_ninstances"] * metrics_l["avg_support"] +
# metrics_r["_ninstances"] * metrics_r["avg_support"]) /
# (metrics_l["_ninstances"] + metrics_r["_ninstances"])
# # Best confidence of the best-confidence path passing through the node
# metrics["best_confidence"] = max(metrics_l["best_confidence"], metrics_r["best_confidence"])
# # Support of the current node
# if !isnothing(n_tot_inst)
# metrics["support"] = (metrics_l["_ninstances"] + metrics_r["_ninstances"])/n_tot_inst
# # Best support of the best-support path passing through the node
# metrics["best_support"] = max(metrics_l["best_support"], metrics_r["best_support"])
# # Best rule (confidence and support) passing through the node
# for best_rule_p in best_rule_params
# metrics["best_rule_t=$(best_rule_p)"] = max(metrics_l["best_rule_t=$(best_rule_p)"], metrics_r["best_rule_t=$(best_rule_p)"])
# end
# end
# metrics
# end
# tree_walk_metrics(tree::DTree; kwargs...) = tree_walk_metrics(tree.root; kwargs...)
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 27243 | using SoleData: AbstractModalLogiset
import SoleModels: printmodel, displaymodel
import SoleModels: ninstances, height, nnodes
############################################################################################
# Initial conditions
############################################################################################
using SoleLogics
using SoleLogics: AbstractMultiModalFrame
using SoleLogics: AbstractSyntaxStructure
abstract type InitialCondition end
struct StartWithoutWorld <: InitialCondition end;
const start_without_world = StartWithoutWorld();
struct StartAtCenter <: InitialCondition end;
const start_at_center = StartAtCenter();
struct StartAtWorld{W<:AbstractWorld} <: InitialCondition w::W end;
function initialworldset(fr::AbstractMultiModalFrame{W}, initcond::StartWithoutWorld) where {W<:AbstractWorld}
Worlds{W}([SoleLogics.emptyworld(fr)])
end
function initialworldset(fr::AbstractMultiModalFrame{W}, initcond::StartAtCenter) where {W<:AbstractWorld}
Worlds{W}([SoleLogics.centralworld(fr)])
end
function initialworldset(::AbstractMultiModalFrame{W}, initcond::StartAtWorld{W}) where {W<:AbstractWorld}
Worlds{W}([initcond.w])
end
anchor(φ::AbstractSyntaxStructure, ::StartWithoutWorld) = φ
anchor(φ::AbstractSyntaxStructure, ::StartAtCenter) = DiamondRelationalConnective(SoleLogics.tocenterrel)(φ)
anchor(φ::AbstractSyntaxStructure, cm::StartAtWorld) = DiamondRelationalConnective(SoleLogics.AtWorldRelation(cm.w))(φ)
function initialworldset(
X,
i_instance::Integer,
args...
)
initialworldset(frame(X, i_instance), args...)
end
function initialworldsets(Xs::MultiLogiset, initconds::AbstractVector{<:InitialCondition})
Ss = Vector{Vector{WST} where {W,WST<:Worlds{W}}}(undef, nmodalities(Xs)) # Fix
for (i_modality,X) in enumerate(eachmodality(Xs))
W = worldtype(X)
Ss[i_modality] = Worlds{W}[initialworldset(X, i_instance, initconds[i_modality]) for i_instance in 1:ninstances(Xs)]
# Ss[i_modality] = Worlds{W}[[Interval(1,2)] for i_instance in 1:ninstances(Xs)]
end
Ss
end
############################################################################################
"""
A decision is an object that is placed at an internal decision node,
and influences on how the instances are routed to its left or right child.
"""
abstract type AbstractDecision end
"""
Abstract type for nodes in a decision tree.
"""
abstract type AbstractNode{L<:Label} end
predictiontype(::AbstractNode{L}) where {L} = L
"""
Abstract type for leaves in a decision tree.
"""
abstract type AbstractDecisionLeaf{L<:Label} <: AbstractNode{L} end
"""
Abstract type for internal decision nodes of a decision tree.
"""
abstract type AbstractDecisionInternal{L<:Label,D<:AbstractDecision} <: AbstractNode{L} end
"""
Union type for internal and decision nodes of a decision tree.
"""
const DTNode{L<:Label,D<:AbstractDecision} = Union{<:AbstractDecisionLeaf{<:L},<:AbstractDecisionInternal{L,D}}
isleftchild(node::DTNode, parent::AbstractDecisionInternal) = (left(parent) == node)
isrightchild(node::DTNode, parent::AbstractDecisionInternal) = (right(parent) == node)
isinleftsubtree(node::DTNode, parent::AbstractDecisionInternal) = isleftchild(node, parent) || isinsubtree(node, left(parent))
isinrightsubtree(node::DTNode, parent::AbstractDecisionInternal) = isrightchild(node, parent) || isinsubtree(node, right(parent))
isinsubtree(node::DTNode, parent::DTNode) = (node == parent) || (isinleftsubtree(node, parent) || isinrightsubtree(node, parent))
isleftchild(node::DTNode, parent::AbstractDecisionLeaf) = false
isrightchild(node::DTNode, parent::AbstractDecisionLeaf) = false
isinleftsubtree(node::DTNode, parent::AbstractDecisionLeaf) = false
isinrightsubtree(node::DTNode, parent::AbstractDecisionLeaf) = false
############################################################################################
############################################################################################
############################################################################################
include("decisions.jl")
############################################################################################
############################################################################################
############################################################################################
# Decision leaf node, holding an output (prediction)
struct DTLeaf{L<:Label} <: AbstractDecisionLeaf{L}
# prediction
prediction :: L
# supporting (e.g., training) instances labels
supp_labels :: Vector{L}
# create leaf
DTLeaf{L}(prediction, supp_labels::AbstractVector) where {L<:Label} = new{L}(prediction, supp_labels)
DTLeaf(prediction::L, supp_labels::AbstractVector) where {L<:Label} = DTLeaf{L}(prediction, supp_labels)
# create leaf without supporting labels
DTLeaf{L}(prediction) where {L<:Label} = DTLeaf{L}(prediction, L[])
DTLeaf(prediction::L) where {L<:Label} = DTLeaf{L}(prediction, L[])
# create leaf from supporting labels
DTLeaf{L}(supp_labels::AbstractVector) where {L<:Label} = DTLeaf{L}(bestguess(L.(supp_labels)), supp_labels)
function DTLeaf(supp_labels::AbstractVector)
prediction = bestguess(supp_labels)
DTLeaf(prediction, supp_labels)
end
end
prediction(leaf::DTLeaf) = leaf.prediction
function supp_labels(leaf::DTLeaf; train_or_valid = true)
@assert train_or_valid == true
leaf.supp_labels
end
function predictions(leaf::DTLeaf; train_or_valid = true)
@assert train_or_valid == true
fill(prediction(leaf), length(supp_labels(leaf; train_or_valid = train_or_valid)))
end
############################################################################################
# DATASET_TYPE = MultiLogiset
DATASET_TYPE = Any
struct PredictingFunction{L<:Label}
# f::FunctionWrapper{Vector{L},Tuple{DATASET_TYPE}} # TODO restore!!!
f::FunctionWrapper{Any,Tuple{DATASET_TYPE}}
function PredictingFunction{L}(f::Any) where {L<:Label}
# new{L}(FunctionWrapper{Vector{L},Tuple{DATASET_TYPE}}(f)) # TODO restore!!!
new{L}(FunctionWrapper{Any,Tuple{DATASET_TYPE}}(f))
end
end
(pf::PredictingFunction{L})(args...; kwargs...) where {L} = pf.f(args...; kwargs...)::Vector{L}
# const ModalInstance = Union{AbstractArray,Any}
# const LFun{L} = FunctionWrapper{L,Tuple{ModalInstance}}
# TODO maybe join DTLeaf and NSDTLeaf Union{L,LFun{L}}
# Decision leaf node, holding an output predicting function
struct NSDTLeaf{L<:Label} <: AbstractDecisionLeaf{L}
# predicting function
predicting_function :: PredictingFunction{L}
# supporting labels
supp_train_labels :: Vector{L}
supp_valid_labels :: Vector{L}
# supporting predictions
supp_train_predictions :: Vector{L}
supp_valid_predictions :: Vector{L}
# create leaf
# NSDTLeaf{L}(predicting_function, supp_labels::AbstractVector) where {L<:Label} = new{L}(predicting_function, supp_labels)
# NSDTLeaf(predicting_function::PredictingFunction{L}, supp_labels::AbstractVector) where {L<:Label} = NSDTLeaf{L}(predicting_function, supp_labels)
# create leaf without supporting labels
function NSDTLeaf{L}(
predicting_function :: PredictingFunction{L},
supp_train_labels :: Vector{L},
supp_valid_labels :: Vector{L},
supp_train_predictions :: Vector{L},
supp_valid_predictions :: Vector{L},
) where {L<:Label}
new{L}(
predicting_function,
supp_train_labels,
supp_valid_labels,
supp_train_predictions,
supp_valid_predictions,
)
end
function NSDTLeaf(
predicting_function :: PredictingFunction{L},
supp_train_labels :: Vector{L},
supp_valid_labels :: Vector{L},
supp_train_predictions :: Vector{L},
supp_valid_predictions :: Vector{L},
) where {L<:Label}
NSDTLeaf{L}(
predicting_function,
supp_train_labels,
supp_valid_labels,
supp_train_predictions,
supp_valid_predictions,
)
end
function NSDTLeaf{L}(f::Base.Callable, args...; kwargs...) where {L<:Label}
NSDTLeaf{L}(PredictingFunction{L}(f), args...; kwargs...)
end
# create leaf from supporting labels
# NSDTLeaf{L}(supp_labels::AbstractVector) where {L<:Label} = NSDTLeaf{L}(bestguess(supp_labels), supp_labels)
# function NSDTLeaf(supp_labels::AbstractVector)
# predicting_function = bestguess(supp_labels)
# NSDTLeaf(predicting_function, supp_labels)
# end
end
predicting_function(leaf::NSDTLeaf) = leaf.predicting_function
supp_labels(leaf::NSDTLeaf; train_or_valid = true) = (train_or_valid ? leaf.supp_train_labels : leaf.supp_valid_labels)
predictions(leaf::NSDTLeaf; train_or_valid = true) = (train_or_valid ? leaf.supp_train_predictions : leaf.supp_valid_predictions)
############################################################################################
using SoleData: ScalarExistentialFormula
# Internal decision node, holding a split-decision and a modality index
struct DTInternal{L<:Label,D<:AbstractDecision} <: AbstractDecisionInternal{L,D}
# modality index + split-decision
i_modality :: ModalityId
decision :: D
# representative leaf for the current node
this :: AbstractDecisionLeaf{<:L}
# child nodes
left :: Union{AbstractDecisionLeaf{<:L}, DTInternal{<:L,<:AbstractDecision}}
right :: Union{AbstractDecisionLeaf{<:L}, DTInternal{<:L,<:AbstractDecision}}
# semantics-specific miscellanoeus info
miscellaneous :: NamedTuple
# create node
function DTInternal{L,D}(
i_modality :: ModalityId,
decision :: D,
this :: AbstractDecisionLeaf,
left :: Union{AbstractDecisionLeaf,DTInternal},
right :: Union{AbstractDecisionLeaf,DTInternal},
miscellaneous :: NamedTuple = (;),
) where {D<:AbstractDecision,L<:Label}
new{L,D}(i_modality, decision, this, left, right, miscellaneous)
end
function DTInternal{L}(
i_modality :: ModalityId,
decision :: D,
this :: AbstractDecisionLeaf{<:L},
left :: Union{AbstractDecisionLeaf{<:L}, DTInternal{<:L,D}},
right :: Union{AbstractDecisionLeaf{<:L}, DTInternal{<:L,D}},
miscellaneous :: NamedTuple = (;),
) where {D<:AbstractDecision,L<:Label}
node = DTInternal{L,D}(i_modality, decision, this, left, right, miscellaneous)
if decision isa DoubleEdgedDecision
_back!(decision, Ref(node))
_forth!(decision, Ref(node))
end
return node
end
function DTInternal(
i_modality :: ModalityId,
decision :: D,
this :: AbstractDecisionLeaf{L0},
left :: Union{AbstractDecisionLeaf{L1}, DTInternal{L1}},
right :: Union{AbstractDecisionLeaf{L2}, DTInternal{L2}},
miscellaneous :: NamedTuple = (;),
) where {D<:AbstractDecision,L0<:Label,L1<:Label,L2<:Label}
L = Union{L0,L1,L2}
node = DTInternal{L,D}(i_modality, decision, this, left, right, miscellaneous)
if decision isa DoubleEdgedDecision
_back!(decision, Ref(node))
_forth!(decision, Ref(node))
end
return node
end
# create node without local leaf
function DTInternal{L,D}(
i_modality :: ModalityId,
decision :: D,
left :: Union{AbstractDecisionLeaf,DTInternal},
right :: Union{AbstractDecisionLeaf,DTInternal},
miscellaneous :: NamedTuple = (;),
) where {D<:Union{AbstractDecision,ScalarExistentialFormula},L<:Label}
if decision isa ScalarExistentialFormula
decision = RestrictedDecision(decision)
end
this = squashtoleaf(Union{<:AbstractDecisionLeaf,<:DTInternal}[left, right])
node = DTInternal{L,D}(i_modality, decision, this, left, right, miscellaneous)
if decision isa DoubleEdgedDecision
_back!(decision, Ref(node))
_forth!(decision, Ref(node))
end
return node
end
function DTInternal{L}(
i_modality :: ModalityId,
decision :: D,
left :: Union{AbstractDecisionLeaf{<:L}, DTInternal{<:L}},
right :: Union{AbstractDecisionLeaf{<:L}, DTInternal{<:L}},
miscellaneous :: NamedTuple = (;),
) where {D<:AbstractDecision,L<:Label}
node = DTInternal{L,D}(i_modality, decision, left, right, miscellaneous)
if decision isa DoubleEdgedDecision
_back!(decision, Ref(node))
_forth!(decision, Ref(node))
end
return node
end
function DTInternal(
i_modality :: ModalityId,
decision :: _D,
left :: Union{AbstractDecisionLeaf{L1}, DTInternal{L1}},
right :: Union{AbstractDecisionLeaf{L2}, DTInternal{L2}},
miscellaneous :: NamedTuple = (;),
) where {_D<:Union{AbstractDecision,ScalarExistentialFormula},L1<:Label,L2<:Label}
if decision isa ScalarExistentialFormula
decision = RestrictedDecision(decision)
end
L = Union{L1,L2}
D = typeof(decision)
node = DTInternal{L,D}(i_modality, decision, left, right, miscellaneous)
if decision isa DoubleEdgedDecision
_back!(decision, Ref(node))
_forth!(decision, Ref(node))
end
return node
end
end
i_modality(node::DTInternal) = node.i_modality
decision(node::DTInternal) = node.decision
this(node::DTInternal) = node.this
left(node::DTInternal) = node.left
right(node::DTInternal) = node.right
miscellaneous(node::DTInternal) = node.miscellaneous
############################################################################################
############################################################################################
############################################################################################
function back!(ν1::DTInternal{<:Label,<:DoubleEdgedDecision}, ν2::DTNode)
_back!(decision(ν1), Ref(ν2))
return ν1
end
function forth!(ν1::DTInternal{<:Label,<:DoubleEdgedDecision}, ν2::DTNode)
_forth!(decision(ν1), Ref(ν2))
return ν1
end
function back(ν1::DTInternal{<:Label,<:DoubleEdgedDecision})
return back(decision(ν1))
end
function forth(ν1::DTInternal{<:Label,<:DoubleEdgedDecision})
return forth(decision(ν1))
end
function isbackloop(ν1::DTInternal{<:Label,<:DoubleEdgedDecision})
return ν1 == back(ModalDecisionTrees.decision(ν1))
end
function isforthloop(ν1::DTInternal{<:Label,<:DoubleEdgedDecision})
return ν1 == forth(ModalDecisionTrees.decision(ν1))
end
function supp_labels(node::DTInternal; train_or_valid = true)
@assert train_or_valid == true
supp_labels(this(node); train_or_valid = train_or_valid)
end
function restricted2complete(ν::DTLeaf)
return ν
end
function restricted2complete(ν::DTNode{L,<:RestrictedDecision{<:ScalarExistentialFormula}}) where {L}
_i_modality = ModalDecisionTrees.i_modality(ν)
_decision = ModalDecisionTrees.decision(ν)
_ν1 = restricted2complete(ModalDecisionTrees.left(ν))
_ν2 = restricted2complete(ModalDecisionTrees.right(ν))
if ModalDecisionTrees.is_propositional_decision(_decision)
p = get_atom(formula(_decision))
ded = DoubleEdgedDecision(p)
_ν = DTInternal(_i_modality, ded, _ν1, _ν2)
_ν2 isa DTLeaf || ModalDecisionTrees.back!(_ν2, _ν)
return _ν
else
r = SoleData.relation(formula(_decision))
p = get_atom(formula(_decision))
ded = DoubleEdgedDecision(ExistentialTopFormula(r))
dedleft = DoubleEdgedDecision(p)
__ν1 = DTInternal(_i_modality, dedleft, _ν1, _ν2)
_ν = DTInternal(_i_modality, ded, __ν1, _ν2)
ModalDecisionTrees.forth!(_ν, __ν1) # _forth!(decision(ν), Ref(__ν1))
_ν1 isa DTLeaf || ModalDecisionTrees.back!(_ν1, _ν)
_ν2 isa DTLeaf || ModalDecisionTrees.back!(_ν2, _ν)
return _ν
end
end
############################################################################################
############################################################################################
############################################################################################
abstract type SymbolicModel{L} end
# Decision Tree
struct DTree{L<:Label} <: SymbolicModel{L}
# root node
root :: DTNode{L}
# world types (one per modality)
worldtypes :: Vector{<:Type}
# initial world conditions (one per modality)
initconditions :: Vector{InitialCondition}
function DTree{L}(
root :: DTNode,
worldtypes :: AbstractVector{<:Type},
initconditions :: AbstractVector{<:InitialCondition},
) where {L<:Label}
@assert length(worldtypes) > 0 "Cannot instantiate DTree with no worldtype."
@assert length(initconditions) > 0 "Cannot instantiate DTree with no initcondition."
new{L}(root, collect(worldtypes), Vector{InitialCondition}(collect(initconditions)))
end
function DTree(
root :: DTNode{L,D},
worldtypes :: AbstractVector{<:Type},
initconditions :: AbstractVector{<:InitialCondition},
) where {L<:Label,D<:AbstractDecision}
DTree{L}(root, worldtypes, initconditions)
end
end
root(tree::DTree) = tree.root
worldtypes(tree::DTree) = tree.worldtypes
initconditions(tree::DTree) = tree.initconditions
############################################################################################
# Decision Forest (i.e., ensable of trees via bagging)
struct DForest{L<:Label} <: SymbolicModel{L}
# trees
trees :: Vector{<:DTree{L}}
# metrics
metrics :: NamedTuple
# create forest from vector of trees
function DForest{L}(
trees :: AbstractVector{<:DTree},
) where {L<:Label}
new{L}(collect(trees), (;))
end
function DForest(
trees :: AbstractVector{<:DTree{L}},
) where {L<:Label}
DForest{L}(trees)
end
# create forest from vector of trees, with attached metrics
function DForest{L}(
trees :: AbstractVector{<:DTree},
metrics :: NamedTuple,
) where {L<:Label}
new{L}(collect(trees), metrics)
end
function DForest(
trees :: AbstractVector{<:DTree{L}},
metrics :: NamedTuple,
) where {L<:Label}
DForest{L}(trees, metrics)
end
end
trees(forest::DForest) = forest.trees
metrics(forest::DForest) = forest.metrics
############################################################################################
# Ensamble of decision trees weighted by softmax autoencoder
struct RootLevelNeuroSymbolicHybrid{F<:Any,L<:Label} <: SymbolicModel{L}
feature_function :: F
# trees
trees :: Vector{<:DTree{L}}
# metrics
metrics :: NamedTuple
function RootLevelNeuroSymbolicHybrid{F,L}(
feature_function :: F,
trees :: AbstractVector{<:DTree},
metrics :: NamedTuple = (;),
) where {F<:Any,L<:Label}
new{F,L}(feature_function, collect(trees), metrics)
end
function RootLevelNeuroSymbolicHybrid(
feature_function :: F,
trees :: AbstractVector{<:DTree{L}},
metrics :: NamedTuple = (;),
) where {F<:Any,L<:Label}
RootLevelNeuroSymbolicHybrid{F,L}(feature_function, trees, metrics)
end
end
trees(nsdt::RootLevelNeuroSymbolicHybrid) = nsdt.trees
metrics(nsdt::RootLevelNeuroSymbolicHybrid) = nsdt.metrics
############################################################################################
# Methods
############################################################################################
# Number of leaves
nleaves(leaf::AbstractDecisionLeaf) = 1
nleaves(node::DTInternal) = nleaves(left(node)) + nleaves(right(node))
nleaves(tree::DTree) = nleaves(root(tree))
nleaves(nsdt::RootLevelNeuroSymbolicHybrid) = sum(nleaves.(trees(nsdt)))
# Number of nodes
nnodes(leaf::AbstractDecisionLeaf) = 1
nnodes(node::DTInternal) = 1 + nnodes(left(node)) + nnodes(right(node))
nnodes(tree::DTree) = nnodes(root(tree))
nnodes(f::DForest) = sum(nnodes.(trees(f)))
nnodes(nsdt::RootLevelNeuroSymbolicHybrid) = sum(nnodes.(trees(nsdt)))
# Number of trees
ntrees(f::DForest) = length(trees(f))
Base.length(f::DForest) = ntrees(f)
ntrees(nsdt::RootLevelNeuroSymbolicHybrid) = length(trees(nsdt))
Base.length(nsdt::RootLevelNeuroSymbolicHybrid) = ntrees(nsdt)
# Height
height(leaf::AbstractDecisionLeaf) = 0
height(node::DTInternal) = 1 + max(height(left(node)), height(right(node)))
height(tree::DTree) = height(root(tree))
height(f::DForest) = maximum(height.(trees(f)))
height(nsdt::RootLevelNeuroSymbolicHybrid) = maximum(height.(trees(nsdt)))
# Modal height
modalheight(leaf::AbstractDecisionLeaf) = 0
modalheight(node::DTInternal) = Int(ismodalnode(node)) + max(modalheight(left(node)), modalheight(right(node)))
modalheight(tree::DTree) = modalheight(root(tree))
modalheight(f::DForest) = maximum(modalheight.(trees(f)))
modalheight(nsdt::RootLevelNeuroSymbolicHybrid) = maximum(modalheight.(trees(nsdt)))
# Number of supporting instances
ninstances(leaf::AbstractDecisionLeaf; train_or_valid = true) = length(supp_labels(leaf; train_or_valid = train_or_valid))
ninstances(node::DTInternal; train_or_valid = true) = ninstances(left(node); train_or_valid = train_or_valid) + ninstances(right(node); train_or_valid = train_or_valid)
ninstances(tree::DTree; train_or_valid = true) = ninstances(root(tree); train_or_valid = train_or_valid)
ninstances(f::DForest; train_or_valid = true) = maximum(map(t->ninstances(t; train_or_valid = train_or_valid), trees(f))) # TODO actually wrong
ninstances(nsdt::RootLevelNeuroSymbolicHybrid; train_or_valid = true) = maximum(map(t->ninstances(t; train_or_valid = train_or_valid), trees(nsdt))) # TODO actually wrong
############################################################################################
############################################################################################
isleafnode(leaf::AbstractDecisionLeaf) = true
isleafnode(node::DTInternal) = false
isleafnode(tree::DTree) = isleafnode(root(tree))
ismodalnode(node::DTInternal) = (!isleafnode(node) && !is_propositional_decision(decision(node)))
ismodalnode(tree::DTree) = ismodalnode(root(tree))
############################################################################################
############################################################################################
displaydecision(node::DTInternal, args...; kwargs...) =
displaydecision(i_modality(node), decision(node), args...; node = node, kwargs...)
# displaydecision_inverse(node::DTInternal, args...; kwargs...) =
# displaydecision_inverse(i_modality(node), decision(node), args...; kwargs...)
############################################################################################
############################################################################################
Base.show(io::IO, a::Union{DTNode,DTree,DForest}) = println(io, display(a))
function display(leaf::DTLeaf{L}) where {L<:CLabel}
return """
Classification Decision Leaf{$(L)}(
label: $(prediction(leaf))
supporting labels: $(supp_labels(leaf))
supporting labels countmap: $(StatsBase.countmap(supp_labels(leaf)))
metrics: $(get_metrics(leaf))
)
"""
end
function display(leaf::DTLeaf{L}) where {L<:RLabel}
return """
Regression Decision Leaf{$(L)}(
label: $(prediction(leaf))
supporting labels: $(supp_labels(leaf))
metrics: $(get_metrics(leaf))
)
"""
end
function display(leaf::NSDTLeaf{L}) where {L<:CLabel}
return """
Classification Functional Decision Leaf{$(L)}(
predicting_function: $(leaf.predicting_function)
supporting labels (train): $(leaf.supp_train_labels)
supporting labels (valid): $(leaf.supp_valid_labels)
supporting predictions (train): $(leaf.supp_train_predictions)
supporting predictions (valid): $(leaf.supp_valid_predictions)
supporting labels countmap (train): $(StatsBase.countmap(leaf.supp_train_labels))
supporting labels countmap (valid): $(StatsBase.countmap(leaf.supp_valid_labels))
supporting predictions countmap (train): $(StatsBase.countmap(leaf.supp_train_predictions))
supporting predictions countmap (valid): $(StatsBase.countmap(leaf.supp_valid_predictions))
metrics (train): $(get_metrics(leaf; train_or_valid = true))
metrics (valid): $(get_metrics(leaf; train_or_valid = false))
)
"""
end
function display(leaf::NSDTLeaf{L}) where {L<:RLabel}
return """
Regression Functional Decision Leaf{$(L)}(
predicting_function: $(leaf.predicting_function)
supporting labels (train): $(leaf.supp_train_labels)
supporting labels (valid): $(leaf.supp_valid_labels)
supporting predictions (train): $(leaf.supp_train_predictions)
supporting predictions (valid): $(leaf.supp_valid_predictions)
metrics (train): $(get_metrics(leaf; train_or_valid = true))
metrics (valid): $(get_metrics(leaf; train_or_valid = false))
)
"""
end
function display(node::DTInternal{L,D}) where {L,D}
return """
Decision Node{$(L),$(D)}(
$(display(this(node)))
###########################################################
i_modality: $(i_modality(node))
decision: $(displaydecision(node))
miscellaneous: $(miscellaneous(node))
###########################################################
sub-tree leaves: $(nleaves(node))
sub-tree nodes: $(nnodes(node))
sub-tree height: $(height(node))
sub-tree modal height: $(modalheight(node))
)
"""
end
function display(tree::DTree{L}) where {L}
return """
Decision Tree{$(L)}(
worldtypes: $(worldtypes(tree))
initconditions: $(initconditions(tree))
###########################################################
sub-tree leaves: $(nleaves(tree))
sub-tree nodes: $(nnodes(tree))
sub-tree height: $(height(tree))
sub-tree modal height: $(modalheight(tree))
###########################################################
tree:
$(displaymodel(tree))
)
"""
end
function display(forest::DForest{L}) where {L}
return """
Decision Forest{$(L)}(
# trees: $(ntrees(forest))
metrics: $(metrics(forest))
forest:
$(displaymodel(forest))
)
"""
end
function display(nsdt::RootLevelNeuroSymbolicHybrid{F,L}) where {F,L}
return """
Root-Level Neuro-Symbolic Decision Tree Hybrid{$(F),$(L)}(
# trees: $(ntrees(nsdt))
metrics: $(metrics(nsdt))
nsdt:
$(displaymodel(nsdt))
)
"""
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 13061 |
include("ModalCART.jl")
################################################################################
############################# Unimodal datasets ################################
################################################################################
function build_stump(X::AbstractModalLogiset, args...; kwargs...)
build_stump(MultiLogiset(X), args...; kwargs...)
end
function build_tree(X::AbstractModalLogiset, args...; kwargs...)
build_tree(MultiLogiset(X), args...; kwargs...)
end
function build_forest(X::AbstractModalLogiset, args...; kwargs...)
build_forest(MultiLogiset(X), args...; kwargs...)
end
################################################################################
############################ Multimodal datasets ###############################
################################################################################
doc_build = """
build_stump(X, Y, W = nothing; kwargs...)
build_tree(X, Y, W = nothing; kwargs...)
build_forest(X, Y, W = nothing; kwargs...)
Train a decision stump (i.e., decision tree with depth 1), a decision tree, or
a random forest model on logiset `X` with labels `Y` and weights `W`.
"""
"""$(doc_build)"""
function build_stump(
X :: MultiLogiset,
Y :: AbstractVector{L},
W :: Union{Nothing,AbstractVector{U},Symbol} = nothing;
kwargs...,
) where {L<:Label,U}
params = NamedTuple(kwargs)
@assert !haskey(params, :max_depth) || params.max_depth == 1 "build_stump " *
"does not allow max_depth != 1."
build_tree(X, Y, W; max_depth = 1, kwargs...)
end
"""$(doc_build)"""
function build_tree(
X :: MultiLogiset,
Y :: AbstractVector{L},
W :: Union{Nothing,AbstractVector{U},Symbol} = default_weights(ninstances(X));
##############################################################################
loss_function :: Union{Nothing,Loss} = nothing,
lookahead :: Union{Nothing,Integer} = nothing,
max_depth :: Union{Nothing,Int64} = nothing,
min_samples_leaf :: Int64 = BOTTOM_MIN_SAMPLES_LEAF,
min_purity_increase :: AbstractFloat = BOTTOM_MIN_PURITY_INCREASE,
max_purity_at_leaf :: AbstractFloat = BOTTOM_MAX_PURITY_AT_LEAF,
##############################################################################
max_modal_depth :: Union{Nothing,Int64} = nothing,
n_subrelations :: Union{Function,AbstractVector{<:Function}} = identity,
n_subfeatures :: Union{Function,AbstractVector{<:Function}} = identity,
initconditions :: Union{InitialCondition,AbstractVector{<:InitialCondition}} = start_without_world,
allow_global_splits :: Union{Bool,AbstractVector{Bool}} = true,
##############################################################################
use_minification :: Bool = false,
perform_consistency_check :: Bool = DEFAULT_PERFORM_CONSISTENCY_CHECK,
##############################################################################
rng :: Random.AbstractRNG = Random.GLOBAL_RNG,
print_progress :: Bool = true,
) where {L<:Label,U}
@assert W isa AbstractVector || W in [nothing, :rebalance, :default]
W = if isnothing(W) || W == :default
default_weights(Y)
elseif W == :rebalance
balanced_weights(Y)
else
W
end
@assert all(W .>= 0) "Sample weights must be non-negative."
@assert ninstances(X) == length(Y) == length(W) "Mismatching number of samples in X, Y & W: $(ninstances(X)), $(length(Y)), $(length(W))"
if isnothing(loss_function)
loss_function = default_loss_function(L)
end
if isnothing(lookahead)
lookahead = 0
end
if allow_global_splits isa Bool
allow_global_splits = fill(allow_global_splits, nmodalities(X))
end
if n_subrelations isa Function
n_subrelations = fill(n_subrelations, nmodalities(X))
end
if n_subfeatures isa Function
n_subfeatures = fill(n_subfeatures, nmodalities(X))
end
if initconditions isa InitialCondition
initconditions = fill(initconditions, nmodalities(X))
end
@assert isnothing(max_depth) || (max_depth >= 0)
@assert isnothing(max_modal_depth) || (max_modal_depth >= 0)
fit_tree(X, Y, initconditions, W
;###########################################################################
loss_function = loss_function,
lookahead = lookahead,
max_depth = max_depth,
min_samples_leaf = min_samples_leaf,
min_purity_increase = min_purity_increase,
max_purity_at_leaf = max_purity_at_leaf,
############################################################################
max_modal_depth = max_modal_depth,
n_subrelations = n_subrelations,
n_subfeatures = [ n_subfeatures[i](nfeatures(modality)) for (i,modality) in enumerate(eachmodality(X)) ],
allow_global_splits = allow_global_splits,
############################################################################
use_minification = use_minification,
perform_consistency_check = perform_consistency_check,
############################################################################
rng = rng,
print_progress = print_progress,
)
end
"""$(doc_build)"""
function build_forest(
X :: MultiLogiset,
Y :: AbstractVector{L},
# Use unary weights if no weight is supplied
W :: Union{Nothing,AbstractVector{U},Symbol} = default_weights(Y);
##############################################################################
# Forest logic-agnostic parameters
ntrees = 100,
partial_sampling = 0.7, # portion of sub-sampled samples (without replacement) by each tree
##############################################################################
# Tree logic-agnostic parameters
loss_function :: Union{Nothing,Loss} = nothing,
lookahead :: Union{Nothing,Integer} = nothing,
max_depth :: Union{Nothing,Int64} = nothing,
min_samples_leaf :: Int64 = BOTTOM_MIN_SAMPLES_LEAF,
min_purity_increase :: AbstractFloat = BOTTOM_MIN_PURITY_INCREASE,
max_purity_at_leaf :: AbstractFloat = BOTTOM_MAX_PURITY_AT_LEAF,
##############################################################################
# Modal parameters
max_modal_depth :: Union{Nothing,Int64} = nothing,
n_subrelations :: Union{Function,AbstractVector{<:Function}} = identity,
n_subfeatures :: Union{Function,AbstractVector{<:Function}} = x -> ceil(Int64, sqrt(x)),
initconditions :: Union{InitialCondition,AbstractVector{<:InitialCondition}} = start_without_world,
allow_global_splits :: Union{Bool,AbstractVector{Bool}} = true,
##############################################################################
use_minification :: Bool = false,
perform_consistency_check :: Bool = DEFAULT_PERFORM_CONSISTENCY_CHECK,
##############################################################################
rng :: Random.AbstractRNG = Random.GLOBAL_RNG,
print_progress :: Bool = true,
suppress_parity_warning :: Bool = false,
) where {L<:Label,U}
@assert W isa AbstractVector || W in [nothing, :rebalance, :default]
W = if isnothing(W) || W == :default
default_weights(Y)
elseif W == :rebalance
balanced_weights(Y)
else
W
end
@assert all(W .>= 0) "Sample weights must be non-negative."
@assert ninstances(X) == length(Y) == length(W) "Mismatching number of samples in X, Y & W: $(ninstances(X)), $(length(Y)), $(length(W))"
if n_subrelations isa Function
n_subrelations = fill(n_subrelations, nmodalities(X))
end
if n_subfeatures isa Function
n_subfeatures = fill(n_subfeatures, nmodalities(X))
end
if initconditions isa InitialCondition
initconditions = fill(initconditions, nmodalities(X))
end
if allow_global_splits isa Bool
allow_global_splits = fill(allow_global_splits, nmodalities(X))
end
if ntrees < 1
error("the number of trees must be >= 1")
end
if !(0.0 < partial_sampling <= 1.0)
error("partial_sampling must be in the range (0,1]")
end
if any(map(f->!(f isa SupportedLogiset), eachmodality(X)))
@warn "Warning! Consider using structures optimized for model checking " *
"such as SupportedLogiset."
end
tot_samples = ninstances(X)
num_samples = floor(Int64, partial_sampling * tot_samples)
trees = Vector{DTree{L}}(undef, ntrees)
oob_instances = Vector{Vector{Integer}}(undef, ntrees)
oob_metrics = Vector{NamedTuple}(undef, ntrees)
rngs = [spawn(rng) for i_tree in 1:ntrees]
if print_progress
p = Progress(ntrees; dt = 1, desc = "Computing Forest...")
end
Threads.@threads for i_tree in 1:ntrees
train_idxs = rand(rngs[i_tree], 1:tot_samples, num_samples)
X_slice = SoleData.instances(X, train_idxs, Val(true))
Y_slice = @view Y[train_idxs]
W_slice = SoleBase.slice_weights(W, train_idxs)
trees[i_tree] = build_tree(
X_slice
, Y_slice
, W_slice
;
################################################################################
loss_function = loss_function,
lookahead = lookahead,
max_depth = max_depth,
min_samples_leaf = min_samples_leaf,
min_purity_increase = min_purity_increase,
max_purity_at_leaf = max_purity_at_leaf,
################################################################################
max_modal_depth = max_modal_depth,
n_subrelations = n_subrelations,
n_subfeatures = n_subfeatures,
initconditions = initconditions,
allow_global_splits = allow_global_splits,
################################################################################
use_minification = use_minification,
perform_consistency_check = perform_consistency_check,
################################################################################
rng = rngs[i_tree],
print_progress = false,
)
# grab out-of-bag indices
oob_instances[i_tree] = setdiff(1:tot_samples, train_idxs)
tree_preds = apply(trees[i_tree], SoleData.instances(X, oob_instances[i_tree], Val(true)))
oob_metrics[i_tree] = (;
actual = Y[oob_instances[i_tree]],
predicted = tree_preds,
weights = collect(SoleBase.slice_weights(W, oob_instances[i_tree]))
)
!print_progress || next!(p)
end
metrics = (;
oob_metrics = oob_metrics,
)
if L<:CLabel
# For each sample, construct its random forest predictor
# by averaging (or majority voting) only those
# trees corresponding to boot-strap samples in which the sample did not appear
oob_classified = Vector{Bool}()
Threads.@threads for i in 1:tot_samples
selected_trees = fill(false, ntrees)
# pick every tree trained without i-th sample
for i_tree in 1:ntrees
if i in oob_instances[i_tree] # if i is present in the i_tree-th tree, selecte thi tree
selected_trees[i_tree] = true
end
end
index_of_trees_to_test_with = findall(selected_trees)
if length(index_of_trees_to_test_with) == 0
continue
end
X_slice = SoleData.instances(X, [i], Val(true))
Y_slice = [Y[i]]
preds = apply(trees[index_of_trees_to_test_with], X_slice; suppress_parity_warning = suppress_parity_warning)
push!(oob_classified, Y_slice[1] == preds[1])
end
oob_error = 1.0 - (sum(W[findall(oob_classified)]) / sum(W))
metrics = merge(metrics, (
oob_error = oob_error,
))
end
DForest{L}(trees, metrics)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 6443 |
############################################################################################
# Decisions
############################################################################################
using SoleLogics
using SoleLogics: identityrel, globalrel
using SoleData.DimensionalDatasets: alpha
using SoleData: ScalarOneStepFormula,
ScalarExistentialFormula,
ExistentialTopFormula,
ScalarUniversalFormula
struct NoNode end
function displaydecision(
i_modality::ModalityId,
decision::AbstractDecision;
variable_names_map::Union{Nothing,AbstractVector{<:AbstractVector},AbstractVector{<:AbstractDict}} = nothing,
kwargs...,
)
_variable_names_map = isnothing(variable_names_map) ? nothing : variable_names_map[i_modality]
"{$i_modality} $(displaydecision(decision; variable_names_map = _variable_names_map, kwargs...))"
end
# function displaydecision_inverse(decision::AbstractDecision, args...; node = nothing, kwargs...)
# syntaxstring(dual(decision), args...; node = node, kwargs...)
# end
# function displaydecision_inverse(i_modality::ModalityId, decision::AbstractDecision, args...; node = nothing, kwargs...)
# displaydecision(i_modality, dual(decision), args...; node = node, kwargs...)
# end
is_propositional_decision(d::Atom) = true
is_global_decision(d::Atom) = false
is_propositional_decision(d::ScalarOneStepFormula) = (SoleData.relation(d) == identityrel)
is_global_decision(d::ScalarOneStepFormula) = (SoleData.relation(d) == globalrel)
is_propositional_decision(d::ExistentialTopFormula) = (SoleData.relation(d) == identityrel)
is_global_decision(d::ExistentialTopFormula) = (SoleData.relation(d) == globalrel)
import SoleData: relation, atom, metacond, feature, test_operator, threshold
struct RestrictedDecision{F<:ScalarExistentialFormula} <: AbstractDecision
formula :: F
end
formula(d::RestrictedDecision) = d.formula
relation(d::RestrictedDecision) = relation(formula(d))
atom(d::RestrictedDecision) = atom(formula(d))
metacond(d::RestrictedDecision) = metacond(formula(d))
feature(d::RestrictedDecision) = feature(formula(d))
test_operator(d::RestrictedDecision) = test_operator(formula(d))
threshold(d::RestrictedDecision) = threshold(formula(d))
is_propositional_decision(d::RestrictedDecision) = is_propositional_decision(formula(d))
is_global_decision(d::RestrictedDecision) = is_global_decision(formula(d))
function displaydecision(d::RestrictedDecision; node = NoNode(), displayedges = true, kwargs...)
outstr = ""
outstr *= "RestrictedDecision("
outstr *= syntaxstring(formula(d); kwargs...)
outstr *= ")"
outstr
end
function RestrictedDecision(
d::RestrictedDecision{<:ScalarExistentialFormula},
threshold_backmap::Function
)
f = formula(d)
cond = SoleLogics.value(atom(f))
newcond = ScalarCondition(metacond(cond), threshold_backmap(threshold(cond)))
RestrictedDecision(ScalarExistentialFormula(relation(f), newcond))
end
mutable struct DoubleEdgedDecision{F<:Formula} <: AbstractDecision
formula :: F
_back :: Base.RefValue{N} where N<:AbstractNode # {L,DoubleEdgedDecision}
_forth :: Base.RefValue{N} where N<:AbstractNode # {L,DoubleEdgedDecision}
function DoubleEdgedDecision{F}(formula::F) where {F<:Formula}
@assert F <: Union{Atom,ExistentialTopFormula} "Cannot instantiate " *
"DoubleEdgedDecision with formula of type $(F)."
ded = new{F}()
ded.formula = formula
ded
end
function DoubleEdgedDecision(formula::F) where {F<:Formula}
DoubleEdgedDecision{F}(formula)
end
end
formula(ded::DoubleEdgedDecision) = ded.formula
back(ded::DoubleEdgedDecision) = isdefined(ded, :_back) ? ded._back[] : nothing
forth(ded::DoubleEdgedDecision) = isdefined(ded, :_forth) ? ded._forth[] : nothing
_back(ded::DoubleEdgedDecision) = isdefined(ded, :_back) ? ded._back : nothing
_forth(ded::DoubleEdgedDecision) = isdefined(ded, :_forth) ? ded._forth : nothing
formula!(ded::DoubleEdgedDecision, formula) = (ded.formula = formula)
_back!(ded::DoubleEdgedDecision, _back) = (ded._back = _back)
_forth!(ded::DoubleEdgedDecision, _forth) = (ded._forth = _forth)
# TODO remove?
is_propositional_decision(ded::DoubleEdgedDecision) = is_propositional_decision(formula(ded))
is_global_decision(ded::DoubleEdgedDecision) = is_global_decision(formula(ded))
function displaydecision(ded::DoubleEdgedDecision; node = NoNode(), displayedges = true, kwargs...)
outstr = ""
outstr *= "DoubleEdgedDecision("
outstr *= syntaxstring(formula(ded); kwargs...)
if displayedges
# outstr *= ", " * (isnothing(_back(ded)) ? "-" : "$(typeof(_back(ded))){decision = $(displaydecision(_back(ded)[])), height = $(height(_back(ded)[]))}")
outstr *= ", " * (isnothing(_back(ded)) ? "-" : begin
νb = _back(ded)[]
# @show νb
# @show node
if νb == node
"back{loop}"
else
if node isa NoNode "?" else "" end *
"back{decision = $(displaydecision(decision(νb); node = νb, displayedges = false)), height = $(height(νb))}"
end
end)
# outstr *= ", " * (isnothing(_forth(ded)) ? "-" : "$(typeof(_forth(ded))){decision = $(displaydecision(_forth(ded)[])), height = $(height(_forth(ded)[]))}")
outstr *= ", " * (isnothing(_forth(ded)) ? "-" : begin
νf = _forth(ded)[]
if νf == node
"forth{loop}"
else
if node isa NoNode "?" else "" end *
"forth{decision = $(displaydecision(decision(νf); node = νf, displayedges = false)), height = $(height(νf))}"
end
end)
end
outstr *= ")"
# outstr *= "DoubleEdgedDecision(\n\t"
# outstr *= syntaxstring(formula(ded))
# # outstr *= "\n\tback: " * (isnothing(back(ded)) ? "-" : displaymodel(back(ded), args...; kwargs...))
# # outstr *= "\n\tforth: " * (isnothing(forth(ded)) ? "-" : displaymodel(forth(ded), args...; kwargs...))
# outstr *= "\n\tback: " * (isnothing(_back(ded)) ? "-" : "$(typeof(_back(ded)))")
# outstr *= "\n\tforth: " * (isnothing(_forth(ded)) ? "-" : "$(typeof(_forth(ded)))")
# outstr *= "\n)"
outstr
end
function DoubleEdgedDecision(
d::DoubleEdgedDecision,
threshold_backmap::Function
)
return error("TODO implement")
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 980 |
DEFAULT_PERFORM_CONSISTENCY_CHECK = false
BOTTOM_MAX_DEPTH = typemax(Int64)
BOTTOM_MIN_SAMPLES_LEAF = 1
BOTTOM_MIN_PURITY_INCREASE = -Inf
BOTTOM_MAX_PURITY_AT_LEAF = Inf
BOTTOM_NTREES = typemax(Int64)
BOTTOM_MAX_PERFORMANCE_AT_SPLIT = Inf
BOTTOM_MIN_PERFORMANCE_AT_SPLIT = -Inf
BOTTOM_MAX_MODAL_DEPTH = typemax(Int64)
# function parametrization_is_going_to_prune(pruning_params)
# (haskey(pruning_params, :max_depth) && pruning_params.max_depth < BOTTOM_MAX_DEPTH) ||
# # (haskey(pruning_params, :min_samples_leaf) && pruning_params.min_samples_leaf > BOTTOM_MIN_SAMPLES_LEAF) ||
# (haskey(pruning_params, :min_purity_increase) && pruning_params.min_purity_increase > BOTTOM_MIN_PURITY_INCREASE) ||
# (haskey(pruning_params, :max_purity_at_leaf) && pruning_params.max_purity_at_leaf < BOTTOM_MAX_PURITY_AT_LEAF) ||
# (haskey(pruning_params, :ntrees) && pruning_params.ntrees < BOTTOM_NTREES)
# end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 25395 |
using ResumableFunctions
using SoleLogics: AbstractFrame
using SoleData: AbstractWorld, AbstractWorlds, AbstractFeature
using Logging: @logmsg
using SoleData: AbstractModalLogiset, SupportedLogiset
using SoleData: base, globmemoset
using SoleData: featchannel,
featchannel_onestep_aggregation,
onestep_aggregation
using SoleData: SupportedLogiset, ScalarOneStepMemoset, AbstractFullMemoset
using SoleData.DimensionalDatasets: UniformFullDimensionalLogiset
import SoleData: relations, nrelations, metaconditions, nmetaconditions
import SoleData: supports
import SoleData.DimensionalDatasets: nfeatures, features
using SoleData: Aggregator, TestOperator, ScalarMetaCondition
using SoleData: ScalarExistentialFormula
using DataStructures
"""
Logical datasets with scalar features.
"""
const AbstractScalarLogiset{
W<:AbstractWorld,
U<:Number,
FT<:AbstractFeature,
FR<:AbstractFrame{W}
} = AbstractModalLogiset{W,U,FT,FR}
nrelations(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}}) where {W,U,FT,FR,L,N} = nrelations(supports(X)[1])
nrelations(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}}) where {W,U,FT,FR,L,N} = nrelations(supports(X)[1])
relations(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}}) where {W,U,FT,FR,L,N} = relations(supports(X)[1])
relations(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}}) where {W,U,FT,FR,L,N} = relations(supports(X)[1])
nmetaconditions(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}}) where {W,U,FT,FR,L,N} = nmetaconditions(supports(X)[1])
nmetaconditions(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}}) where {W,U,FT,FR,L,N} = nmetaconditions(supports(X)[1])
metaconditions(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}}) where {W,U,FT,FR,L,N} = metaconditions(supports(X)[1])
metaconditions(X::SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}}) where {W,U,FT,FR,L,N} = metaconditions(supports(X)[1])
"""
Perform the modal step, that is, evaluate an existential formula
on a set of worlds, eventually computing the new world set.
"""
function modalstep(
X, # ::AbstractScalarLogiset{W},
i_instance::Integer,
worlds::AbstractWorlds{W},
decision::RestrictedDecision{<:ScalarExistentialFormula},
return_worldmap::Union{Val{true},Val{false}} = Val(false)
) where {W<:AbstractWorld}
@logmsg LogDetail "modalstep" worlds displaydecision(decision)
# W = worldtype(frame(X, i_instance))
φ = formula(decision)
satisfied = false
# TODO the's room for optimization here: with some relations (e.g. IA_A, IA_L) can be made smaller
if return_worldmap isa Val{true}
worlds_map = ThreadSafeDict{W,AbstractWorlds{W}}()
end
if length(worlds) == 0
# If there are no neighboring worlds, then the modal decision is not met
@logmsg LogDetail " Empty worldset"
else
# Otherwise, check whether at least one of the accessible worlds witnesses truth of the decision.
# TODO rewrite with new_worlds = map(...acc_worlds)
# Initialize new worldset
new_worlds = Worlds{W}()
# List all accessible worlds
acc_worlds = begin
if return_worldmap isa Val{true}
Threads.@threads for curr_w in worlds
acc = accessibles(frame(X, i_instance), curr_w, relation(φ)) |> collect
worlds_map[curr_w] = acc
end
unique(cat([ worlds_map[k] for k in keys(worlds_map) ]...; dims = 1))
else
accessibles(frame(X, i_instance), worlds, relation(φ))
end
end
for w in acc_worlds
if checkcondition(SoleLogics.value(atom(φ)), X, i_instance, w)
# @logmsg LogDetail " Found world " w ch_readWorld ... ch_readWorld(w, channel)
satisfied = true
push!(new_worlds, w)
end
end
if satisfied == true
worlds = new_worlds
else
# If none of the neighboring worlds satisfies the decision, then
# the new set is left unchanged
end
end
if satisfied
@logmsg LogDetail " YES" worlds
else
@logmsg LogDetail " NO"
end
if return_worldmap isa Val{true}
return (satisfied, worlds, worlds_map)
else
return (satisfied, worlds)
end
end
############################################################################################
############################################################################################
############################################################################################
Base.@propagate_inbounds @resumable function generate_decisions(
X::AbstractScalarLogiset{W,U},
i_instances::AbstractVector{<:Integer},
Sf::AbstractVector{<:AbstractWorlds{W}},
allow_propositional_decisions::Bool,
allow_modal_decisions::Bool,
allow_global_decisions::Bool,
modal_relations_inds::AbstractVector,
features_inds::AbstractVector,
grouped_featsaggrsnops::AbstractVector{<:AbstractDict{<:Aggregator,<:AbstractVector{<:ScalarMetaCondition}}},
grouped_featsnaggrs::AbstractVector{<:AbstractVector{Tuple{<:Integer,<:Aggregator}}},
) where {W<:AbstractWorld,U}
# Propositional splits
if allow_propositional_decisions
for decision in generate_propositional_decisions(X, i_instances, Sf, features_inds, grouped_featsaggrsnops, grouped_featsnaggrs)
# @logmsg LogDebug " Testing decision: $(displaydecision(decision))"
@yield decision
end
end
# Global splits
if allow_global_decisions
for decision in generate_global_decisions(X, i_instances, Sf, features_inds, grouped_featsaggrsnops, grouped_featsnaggrs)
# @logmsg LogDebug " Testing decision: $(displaydecision(decision))"
@yield decision
end
end
# Modal splits
if allow_modal_decisions
for decision in generate_modal_decisions(X, i_instances, Sf, modal_relations_inds, features_inds, grouped_featsaggrsnops, grouped_featsnaggrs)
# @logmsg LogDebug " Testing decision: $(displaydecision(decision))"
@yield decision
end
end
end
using StatsBase
function countmap_with_domain(v::AbstractVector, keyset::AbstractVector = unique(v), eltype = Float64)
res = StatsBase.countmap(v)
for el in keyset
if !haskey(res, el)
res[el] = zero(eltype)
end
end
res = Dict(collect(zip(keys(res), eltype.(values(res)))))
return res
end
"""
References:
- "Generalizing Boundary Points"
- "Multi-Interval Discretization of Continuous-Valued Attributes for Classification Learning"
"""
function limit_threshold_domain(
aggr_thresholds::AbstractVector{T},
Y::AbstractVector{L},
W::AbstractVector{U},
loss_function::Loss,
test_op::TestOperator,
min_samples_leaf::Integer,
perform_domain_optimization::Bool;
n_classes::Union{Nothing,Integer} = nothing,
nc::Union{Nothing,AbstractVector{U}} = nothing,
nt::Union{Nothing,U} = nothing,
) where {T,L<:_Label,U}
if allequal(aggr_thresholds) # Always zero entropy
return T[], Nothing[]
end
if loss_function isa ShannonEntropy && test_op in [≥, <, ≤, >] && (W isa Ones) # TODO extendo to allequal(W) # TODO extend to Gini Index, Normalized Distance Measure, Info Gain, Gain Ratio (Ref. [Linear-Time Preprocessing in Optimal Numerical Range Partitioning])
if !perform_domain_optimization
thresh_domain = unique(aggr_thresholds)
thresh_domain = begin
if test_op in [≥, <] # Remove edge-case with zero entropy
_m = minimum(thresh_domain)
filter(x->x != _m, thresh_domain)
elseif test_op in [≤, >] # Remove edge-case with zero entropy
_m = maximum(thresh_domain)
filter(x->x != _m, thresh_domain)
else
thresh_domain
end
end
return thresh_domain, fill(nothing, length(thresh_domain))
else
p = sortperm(aggr_thresholds)
_ninstances = length(Y)
_aggr_thresholds = aggr_thresholds[p]
_Y = Y[p]
# thresh_domain = unique(_aggr_thresholds)
# sort!(thresh_domain)
ps = pairs(SoleBase._groupby(first, zip(_aggr_thresholds, _Y) |> collect))
groupedY = map(((k,v),)->begin
Ys = map(last, v)
# footprint = sort((Ys)) # associated with ==, it works
# footprint = sort(unique(Ys)) # couldn't get it to work.
# footprint = countmap(Ys; alg = :dict)
footprint = countmap(Ys);
footprint = collect(footprint); # footprint = map(((k,c),)->k=>c/sum(values(footprint)), collect(footprint)); # normalized
sort!(footprint; by = first)
k => (Ys, footprint)
end, collect(ps))
# groupedY = map(((k,v),)->(k=>sort(map(last, v))), collect(ps))
# groupedY = map(((k,v),)->(k=>sort(unique(map(last, v)))), collect(ps))
function is_same_footprint(f1, f2)
if f1 == f2
return true
end
norm_f1 = map(((k,c),)->k=>c/sum(last.(f1)), f1)
norm_f2 = map(((k,c),)->k=>c/sum(last.(f2)), f2)
return norm_f1 == norm_f2
end
if test_op in [≥, <]
sort!(groupedY; by=first, rev = true)
elseif test_op in [≤, >]
sort!(groupedY; by=first)
else
error("Unexpected test_op: $(test_op)")
end
thresh_domain, _thresh_Ys, _thresh_footprint = first.(groupedY), first.(last.(groupedY)), last.(last.(groupedY))
# Filter out those that do not comply with min_samples_leaf
n_left = 0
is_boundary_point = map(__thresh_Ys->begin
n_left = n_left + length(__thresh_Ys)
((n_left >= min_samples_leaf && _ninstances-n_left >= min_samples_leaf))
end, _thresh_Ys)
# Reference: ≤
is_boundary_point = map(((i, honors_min_samples_leaf),)->begin
# last = (i == length(is_boundary_point))
(
(honors_min_samples_leaf &&
# (!last &&
!(is_boundary_point[i+1] && is_same_footprint(_thresh_footprint[i], _thresh_footprint[i+1])))
# !(is_boundary_point[i+1] && isapprox(_thresh_footprint[i], _thresh_footprint[i+1]))) # TODO better..?
# !(is_boundary_point[i+1] && issubset(_thresh_footprint[i+1], _thresh_footprint[i]))) # Probably doesn't work
# !(is_boundary_point[i+1] && issubset(_thresh_footprint[i], _thresh_footprint[i+1]))) # Probably doesn't work
# true)
)
end, enumerate(is_boundary_point))
thresh_domain = thresh_domain[is_boundary_point]
# NOTE: pretending that these are the right counts, when they are actually the left counts!!! It doesn't matter, it's symmetric.
# cur_left_counts = countmap_with_domain(L[], UnitRange{L}(1:n_classes), U)
cur_left_counts = fill(zero(U), n_classes)
additional_info = map(Ys->begin
# addcounts!(cur_left_counts, Ys)
# f = collect(values(cur_left_counts))
# weight = first(W) # when allequal(W)
weight = one(U)
[cur_left_counts[y] += weight for y in Ys]
f = cur_left_counts
if test_op in [≥, ≤]
# These are left counts
ncl, nl = copy(f), sum(f)
# ncr = Vector{U}(undef, n_classes)
# ncr .= nc .- ncl
ncr = nc .- ncl
nr = nt - nl
else
# These are right counts
ncr, nr = copy(f), sum(f)
# ncl = Vector{U}(undef, n_classes)
# ncl .= nc .- ncr
ncl = nc .- ncr
nl = nt - nr
end
threshold_info = (ncr, nr, ncl, nl)
threshold_info
end, _thresh_Ys)[is_boundary_point]
# @show typeof(additional_info)
# @show typeof(additional_info[1])
# @show test_op, min_samples_leaf
# @show groupedY
# @show sum(is_boundary_point), length(thresh_domain), sum(is_boundary_point)/length(thresh_domain)
return thresh_domain, additional_info
end
else
thresh_domain = unique(aggr_thresholds)
return thresh_domain, fill(nothing, length(thresh_domain))
end
end
# function limit_threshold_domain(loss_function::Loss, aggr_thresholds::AbstractVector{U}) where {U}
# # @show aggr_thresholds
# thresh_domain = begin
# if U <: Bool
# unique(aggr_thresholds)
# else
# setdiff(Set(aggr_thresholds),Set([typemin(U), typemax(U)]))
# end
# end
# # @show thresh_domain
# return thresh_domain
# end
############################################################################################
Base.@propagate_inbounds @resumable function generate_propositional_decisions(
X::AbstractScalarLogiset{W,U,FT,FR},
i_instances::AbstractVector{<:Integer},
Sf::AbstractVector{<:AbstractWorlds{W}},
features_inds::AbstractVector,
grouped_featsaggrsnops::AbstractVector{<:AbstractDict{<:Aggregator,<:AbstractVector{<:ScalarMetaCondition}}},
grouped_featsnaggrs::AbstractVector{<:AbstractVector{Tuple{<:Integer,<:Aggregator}}},
) where {W<:AbstractWorld,U,FT<:AbstractFeature,N,FR<:FullDimensionalFrame{N,W}}
relation = identityrel
_ninstances = length(i_instances)
_features = features(X)
# For each feature
@inbounds for i_feature in features_inds
feature = _features[i_feature]
@logmsg LogDebug "Feature $(i_feature): $(feature)"
# operators for each aggregator
aggrsnops = grouped_featsaggrsnops[i_feature]
# Vector of aggregators
aggregators = keys(aggrsnops) # Note: order-variant, but that's ok here
# dict->vector
# aggrsnops = [aggrsnops[i_aggregator] for i_aggregator in aggregators]
# Initialize thresholds with the bottoms
thresholds = Array{U,2}(undef, length(aggregators), _ninstances)
for (i_aggregator,aggregator) in enumerate(aggregators)
thresholds[i_aggregator,:] .= aggregator_bottom(aggregator, U)
end
# For each instance, compute thresholds by applying each aggregator to the set of existing values (from the worldset)
for (instance_idx,i_instance) in enumerate(i_instances)
# @logmsg LogDetail " Instance $(instance_idx)/$(_ninstances)"
worlds = Sf[instance_idx]
# TODO also try this instead
# values = [X[i_instance, w, i_feature] for w in worlds]
# thresholds[:,instance_idx] = map(aggregator->aggregator(values), aggregators)
for w in worlds
# gamma = featvalue(feature, X[i_instance, w) # TODO in general!
gamma = featvalue(feature, X, i_instance, w, i_feature)
for (i_aggregator,aggregator) in enumerate(aggregators)
thresholds[i_aggregator,instance_idx] = SoleData.aggregator_to_binary(aggregator)(gamma, thresholds[i_aggregator,instance_idx])
end
end
end
# tested_metacondition = TestOperator[]
# @logmsg LogDebug "thresholds: " thresholds
# For each aggregator
for (i_aggregator,aggregator) in enumerate(aggregators)
aggr_thresholds = thresholds[i_aggregator,:]
for metacondition in aggrsnops[aggregator]
test_op = SoleData.test_operator(metacondition)
@yield relation, metacondition, test_op, aggr_thresholds
end # for metacondition
end # for aggregator
end # for feature
end
############################################################################################
Base.@propagate_inbounds @resumable function generate_modal_decisions(
X::AbstractScalarLogiset{W,U,FT,FR},
i_instances::AbstractVector{<:Integer},
Sf::AbstractVector{<:AbstractWorlds{W}},
modal_relations_inds::AbstractVector,
features_inds::AbstractVector,
grouped_featsaggrsnops::AbstractVector{<:AbstractDict{<:Aggregator,<:AbstractVector{<:ScalarMetaCondition}}},
grouped_featsnaggrs::AbstractVector{<:AbstractVector{Tuple{<:Integer,<:Aggregator}}},
) where {W<:AbstractWorld,U,FT<:AbstractFeature,N,FR<:FullDimensionalFrame{N,W}}
_ninstances = length(i_instances)
_relations = relations(X)
_features = features(X)
# For each relational connective
for i_relation in modal_relations_inds
relation = _relations[i_relation]
@logmsg LogDebug "Relation $(relation)..."
# For each feature
for i_feature in features_inds
feature = _features[i_feature]
# @logmsg LogDebug "Feature $(i_feature): $(feature)"
# operators for each aggregator
aggrsnops = grouped_featsaggrsnops[i_feature]
# Vector of aggregators
aggregators_with_ids = grouped_featsnaggrs[i_feature]
# dict->vector?
# aggrsnops = [aggrsnops[i_aggregator] for i_aggregator in aggregators]
# Initialize thresholds with the bottoms
thresholds = Array{U,2}(undef, length(aggregators_with_ids), _ninstances)
for (i_aggregator,(_,aggregator)) in enumerate(aggregators_with_ids)
thresholds[i_aggregator,:] .= aggregator_bottom(aggregator, U)
end
# For each instance, compute thresholds by applying each aggregator to the set of existing values (from the worldset)
for (instance_id,i_instance) in enumerate(i_instances)
# @logmsg LogDetail " Instance $(instance_id)/$(_ninstances)"
worlds = Sf[instance_id]
_featchannel = featchannel(base(X), i_instance, i_feature)
for (i_aggregator,(i_metacond,aggregator)) in enumerate(aggregators_with_ids)
metacondition = metaconditions(X)[i_metacond]
for w in worlds
gamma = begin
if true
# _featchannel = featchannel(base(X), i_instance, i_feature)
# featchannel_onestep_aggregation(X, _featchannel, i_instance, w, relation, feature(metacondition), aggregator)
featchannel_onestep_aggregation(X, _featchannel, i_instance, w, relation, metacondition, i_metacond, i_relation)
# onestep_aggregation(X, i_instance, w, relation, feature, aggregator, i_metacond, i_relation)
# elseif X isa UniformFullDimensionalLogiset
# onestep_aggregation(X, i_instance, w, relation, feature, aggregator, i_metacond, i_relation)
else
error("generate_global_decisions is broken.")
end
end
thresholds[i_aggregator,instance_id] = SoleData.aggregator_to_binary(aggregator)(gamma, thresholds[i_aggregator,instance_id])
end
end
# for (i_aggregator,(i_metacond,aggregator)) in enumerate(aggregators_with_ids)
# gammas = [onestep_aggregation(X, i_instance, w, relation, feature, aggregator, i_metacond, i_relation) for w in worlds]
# thresholds[i_aggregator,instance_id] = aggregator(gammas)
# end
end
# @logmsg LogDebug "thresholds: " thresholds
# For each aggregator
for (i_aggregator,(_,aggregator)) in enumerate(aggregators_with_ids)
aggr_thresholds = thresholds[i_aggregator,:]
for metacondition in aggrsnops[aggregator]
# @logmsg LogDetail " Test operator $(metacondition)"
test_op = SoleData.test_operator(metacondition)
@yield relation, metacondition, test_op, aggr_thresholds
end # for metacondition
end # for aggregator
end # for feature
end # for relation
end
############################################################################################
Base.@propagate_inbounds @resumable function generate_global_decisions(
X::AbstractScalarLogiset{W,U,FT,FR},
i_instances::AbstractVector{<:Integer},
Sf::AbstractVector{<:AbstractWorlds{W}},
features_inds::AbstractVector,
grouped_featsaggrsnops::AbstractVector{<:AbstractDict{<:Aggregator,<:AbstractVector{<:ScalarMetaCondition}}},
grouped_featsnaggrs::AbstractVector{<:AbstractVector{Tuple{<:Integer,<:Aggregator}}},
) where {W<:AbstractWorld,U,FT<:AbstractFeature,N,FR<:FullDimensionalFrame{N,W}}
relation = globalrel
_ninstances = length(i_instances)
_features = features(X)
# For each feature
for i_feature in features_inds
feature = _features[i_feature]
@logmsg LogDebug "Feature $(i_feature): $(feature)"
# operators for each aggregator
aggrsnops = grouped_featsaggrsnops[i_feature]
# println(aggrsnops)
# Vector of aggregators
aggregators_with_ids = grouped_featsnaggrs[i_feature]
# println(aggregators_with_ids)
# @show feature
# @show aggrsnops
# @show aggregators_with_ids
# dict->vector
# aggrsnops = [aggrsnops[i_aggregator] for i_aggregator in aggregators]
# # TODO use this optimized version for SupportedLogiset:
# thresholds can in fact be directly given by slicing globmemoset and permuting the two dimensions
# aggregators_ids = fst.(aggregators_with_ids)
# thresholds = transpose(globmemoset(X)[i_instances, aggregators_ids])
# Initialize thresholds with the bottoms
# @show U
thresholds = Array{U,2}(undef, length(aggregators_with_ids), _ninstances)
# for (i_aggregator,(_,aggregator)) in enumerate(aggregators_with_ids)
# thresholds[i_aggregator,:] .= aggregator_bottom(aggregator, U)
# end
# For each instance, compute thresholds by applying each aggregator to the set of existing values (from the worldset)
for (instance_id,i_instance) in enumerate(i_instances)
# @logmsg LogDetail " Instance $(instance_id)/$(_ninstances)"
_featchannel = featchannel(base(X), i_instance, i_feature)
for (i_aggregator,(i_metacond,aggregator)) in enumerate(aggregators_with_ids)
# TODO delegate this job to different flavors of `get_global_gamma`. Test whether the _featchannel assignment outside is faster!
metacondition = metaconditions(X)[i_metacond]
gamma = begin
if true
# _featchannel = featchannel(base(X), i_instance, i_feature)
featchannel_onestep_aggregation(X, _featchannel, i_instance, SoleLogics.emptyworld(frame(X, i_instance)), relation, metacondition, i_metacond)
# onestep_aggregation(X, i_instance, dummyworldTODO, relation, feature, aggregator, i_metacond)
# elseif X isa UniformFullDimensionalLogiset
# onestep_aggregation(X, i_instance, dummyworldTODO, relation, feature, aggregator, i_metacond)
else
error("generate_global_decisions is broken.")
end
end
# @show gamma
thresholds[i_aggregator,instance_id] = gamma
# thresholds[i_aggregator,instance_id] = SoleData.aggregator_to_binary(aggregator)(gamma, thresholds[i_aggregator,instance_id])
# println(gamma)
# println(thresholds[i_aggregator,instance_id])
end
end
# println(thresholds)
@logmsg LogDetail "thresholds: " thresholds
# For each aggregator
for (i_aggregator,(_,aggregator)) in enumerate(aggregators_with_ids)
# println(aggregator)
# @show aggregator
aggr_thresholds = thresholds[i_aggregator,:]
for metacondition in aggrsnops[aggregator]
test_op = SoleData.test_operator(metacondition)
@yield relation, metacondition, test_op, aggr_thresholds
end # for metacondition
end # for aggregator
end # for feature
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 5856 | # TODO remove and use SoleModels.accuracy, SoleModels.mae, SoleModels.mse
function _acc(y_pred, y_true)
@assert length(y_pred) == length(y_true)
return (sum(y_pred .== y_true)/length(y_pred))
end
function _mae(y_pred, y_true)
@assert length(y_pred) == length(y_true)
return (sum(abs.(y_true .- y_pred)) / length(y_true))
end
function _mse(y_pred, y_true)
@assert length(y_pred) == length(y_true)
return (sum((y_true .- y_pred).^2) / length(y_true))
end
function leafperformance(leaf::AbstractDecisionLeaf{L}) where {L}
_gts = supp_labels(leaf)
_preds = fill(prediction(leaf), length(_gts))
if L <: CLabel
_acc(_gts, _preds)
elseif L <: RLabel
_mse(_gts, _preds)
else
error("Could not compute leafperformance with unknown label type: $(L).")
end
end
function get_metrics(
leaf::AbstractDecisionLeaf{<:CLabel};
n_tot_inst = nothing,
rel_confidence_class_counts = nothing,
train_or_valid = true,
silent = false,
)
metrics = (;)
supporting_labels = supp_labels(leaf; train_or_valid = train_or_valid)
supporting_predictions = predictions(leaf; train_or_valid = train_or_valid)
############################################################################
# Confidence, # of supporting labels, # of correctly classified instances
n_inst = length(supporting_labels)
n_correct = sum(supporting_labels .== supporting_predictions)
confidence = n_correct/n_inst
metrics = merge(metrics, (
n_inst = n_inst,
n_correct = n_correct,
confidence = confidence,
))
############################################################################
# Total # of instances
if !isnothing(rel_confidence_class_counts)
if !isnothing(n_tot_inst)
@assert n_tot_inst == sum(values(rel_confidence_class_counts)) "n_tot_inst != sum(values(rel_confidence_class_counts)): $(n_tot_inst) $(sum(values(rel_confidence_class_counts))) sum($(values(rel_confidence_class_counts)))"
else
n_tot_inst = sum(values(rel_confidence_class_counts))
end
metrics = merge(metrics, (
n_tot_inst = n_tot_inst,
))
end
############################################################################
# Lift, class support and others
if !isnothing(rel_confidence_class_counts)
cur_class_counts = begin
cur_class_counts = countmap(supporting_labels)
for class in keys(rel_confidence_class_counts)
if !haskey(cur_class_counts, class)
cur_class_counts[class] = 0
end
end
cur_class_counts
end
rel_tot_inst = sum([cur_class_counts[class]/rel_confidence_class_counts[class] for class in keys(rel_confidence_class_counts)])
# TODO can't remember the rationale behind this?
# if isa(leaf, DTLeaf)
# "rel_conf: $(n_correct/rel_confidence_class_counts[prediction(leaf)])"
# rel_conf = (cur_class_counts[prediction(leaf)]/get(rel_confidence_class_counts, prediction(leaf), 0))/rel_tot_inst
# end
metrics = merge(metrics, (
cur_class_counts = cur_class_counts,
rel_tot_inst = rel_tot_inst,
# rel_conf = rel_conf,
))
if !isnothing(n_tot_inst) && isa(leaf, DTLeaf)
class_support = get(rel_confidence_class_counts, prediction(leaf), 0)/n_tot_inst
lift = confidence/class_support
metrics = merge(metrics, (
class_support = class_support,
lift = lift,
))
end
end
############################################################################
# Support
if !isnothing(n_tot_inst)
support = n_inst/n_tot_inst
metrics = merge(metrics, (
support = support,
))
end
############################################################################
# Conviction
if !isnothing(rel_confidence_class_counts) && !isnothing(n_tot_inst)
conviction = (1-class_support)/(1-confidence)
metrics = merge(metrics, (
conviction = conviction,
))
end
############################################################################
# Sensitivity share: the portion of "responsibility" for the correct classification of class L
if !isnothing(rel_confidence_class_counts) && isa(leaf, DTLeaf)
sensitivity_share = n_correct/get(rel_confidence_class_counts, prediction(leaf), 0)
metrics = merge(metrics, (
sensitivity_share = sensitivity_share,
))
end
metrics
end
function get_metrics(
leaf::AbstractDecisionLeaf{<:RLabel};
n_tot_inst = nothing,
rel_confidence_class_counts = nothing,
train_or_valid = true,
silent = false,
)
@assert isnothing(rel_confidence_class_counts)
metrics = (;)
supporting_labels = supp_labels(leaf; train_or_valid = train_or_valid)
supporting_predictions = predictions(leaf; train_or_valid = train_or_valid)
n_inst = length(supporting_labels)
mae = _mae(supporting_labels, supporting_predictions)
mse = _mse(supporting_labels, supporting_predictions)
# sum(abs.(supporting_labels .- supporting_predictions)) / n_inst
rmse = StatsBase.rmsd(supporting_labels, supporting_predictions)
var = StatsBase.var(supporting_labels)
metrics = merge(metrics, (
n_inst = n_inst,
mae = mae,
mse = mse,
rmse = rmse,
var = var,
))
if !isnothing(n_tot_inst)
support = n_inst/n_tot_inst
metrics = merge(metrics, (
support = support,
))
end
metrics
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 6832 | abstract type Loss end
abstract type ClassificationLoss <: Loss end;
abstract type RegressionLoss <: Loss end;
default_loss_function(::Type{<:CLabel}) = entropy
default_loss_function(::Type{<:RLabel}) = variance
istoploss(::Loss, purity) = isinf(purity)
############################################################################################
# Loss functions for regression and classification
# These functions return the additive inverse of entropy measures
#
# For each measure, three versions are defined:
# - A single version, computing the loss for a single dataset
# - A combined version, computing the loss for a dataset split, equivalent to (ws_l*entropy_l + ws_r*entropy_r)
# - A final version, which corrects the loss and is only computed after the optimization step.
#
# Note: regression losses are defined in the weighted & unweigthed versions
# TODO: write a loss based on gini index
############################################################################################
# Useful references:
# - Wang, Y., & Xia, S. T. (2017, March). Unifying variable splitting criteria of decision trees by Tsallis entropy. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (pp. 2507-2511). IEEE.
############################################################################################
# Classification: Shannon entropy
# (ps = normalize(ws, 1); return -sum(ps.*log.(ps)))
# Source: _shannon_entropy from https://github.com/bensadeghi/DecisionTree.jl/blob/master/src/util.jl, with inverted sign
struct ShannonEntropy <: ClassificationLoss end
istoploss(::ShannonEntropy, purity) = iszero(purity)
# Single
Base.@propagate_inbounds @inline function (::ShannonEntropy)(ws :: AbstractVector{U}, t :: U) where {U<:Real}
s = 0.0
@simd for k in ws
if k > 0
s += k * log(k)
end
end
return -(log(t) - s / t)
end
# Multiple
Base.@propagate_inbounds @inline function (ent::ShannonEntropy)(wss_n_ts::Tuple{AbstractVector{U},U}...) where {U<:Real}
sum(((ws, t),)->t * ShannonEntropy()(ws, t), wss_n_ts)
end
# Correction
Base.@propagate_inbounds @inline function (::ShannonEntropy)(e :: AbstractFloat)
e
end
############################################################################################
# Classification: Shannon (second untested version)
# # Single
# Base.@propagate_inbounds @inline function _shannon_entropy(ws :: AbstractVector{U}, t :: U) where {U<:Real}
# log(t) + _shannon_entropy(ws) / t
# end
# Base.@propagate_inbounds @inline function _shannon_entropy(ws :: AbstractVector{U}) where {U<:Real}
# s = 0.0
# for k in filter((k)->k > 0, ws)
# s += k * log(k)
# end
# s
# end
# # Double
# Base.@propagate_inbounds @inline function _shannon_entropy(
# ws_l :: AbstractVector{U}, tl :: U,
# ws_r :: AbstractVector{U}, tr :: U,
# ) where {U<:Real}
# (tl * log(tl) + _shannon_entropy(ws_l) +
# tr * log(tr) + _shannon_entropy(ws_r))
# end
# # Correction
# Base.@propagate_inbounds @inline function _shannon_entropy(e :: AbstractFloat)
# e*log2(ℯ)
# end
############################################################################################
# Classification: Tsallis entropy
# (ps = normalize(ws, 1); return -log(sum(ps.^alpha))/(1.0-alpha)) with (alpha > 1.0)
# Single
Base.@propagate_inbounds @inline function _tsallis_entropy(alpha :: AbstractFloat, ws :: AbstractVector{U}, t :: U) where {U<:Real}
log(sum(ps = normalize(ws, 1).^alpha))
end
# Double
Base.@propagate_inbounds @inline function _tsallis_entropy(
alpha :: AbstractFloat,
ws_l :: AbstractVector{U}, tl :: U,
ws_r :: AbstractVector{U}, tr :: U,
) where {U<:Real}
(tl * _tsallis_entropy(alpha, ws_l, tl) +
tr * _tsallis_entropy(alpha, ws_r, tr))
end
# Correction
Base.@propagate_inbounds @inline function _tsallis_entropy(alpha :: AbstractFloat, e :: AbstractFloat)
e*(1/(alpha-1.0))
end
TsallisEntropy(alpha::AbstractFloat) = (args...)->_tsallis_entropy(alpha, args...)
############################################################################################
# Classification: Renyi entropy
# (ps = normalize(ws, 1); -(1.0-sum(ps.^alpha))/(alpha-1.0)) with (alpha > 1.0)
# Single
Base.@propagate_inbounds @inline function _renyi_entropy(alpha :: AbstractFloat, ws :: AbstractVector{U}, t :: U) where {U<:Real}
(sum(normalize(ws, 1).^alpha)-1.0)
end
# Double
Base.@propagate_inbounds @inline function _renyi_entropy(
alpha :: AbstractFloat,
ws_l :: AbstractVector{U}, tl :: U,
ws_r :: AbstractVector{U}, tr :: U,
) where {U<:Real}
(tl * _renyi_entropy(alpha, ws_l, tl) +
tr * _renyi_entropy(alpha, ws_r, tr))
end
# Correction
Base.@propagate_inbounds @inline function _renyi_entropy(alpha :: AbstractFloat, e :: AbstractFloat)
e*(1/(alpha-1.0))
end
RenyiEntropy(alpha::AbstractFloat) = (args...)->_renyi_entropy(alpha, args...)
############################################################################################
# Regression: Variance (weighted & unweigthed, see https://en.wikipedia.org/wiki/Weighted_arithmetic_mean)
struct Variance <: RegressionLoss end
# Single
# sum(ws .* ((ns .- (sum(ws .* ns)/t)).^2)) / (t)
Base.@propagate_inbounds @inline function (::Variance)(ns :: AbstractVector{L}, s :: L, t :: Integer) where {L}
# @btime sum((ns .- mean(ns)).^2) / (1 - t)
# @btime (sum(ns.^2)-s^2/t) / (1 - t)
(sum(ns.^2)-s^2/t) / (1 - t)
# TODO remove / (1 - t) from here, and move it to the correction-version of (::Variance), but it must be for single-version only!
end
# Single weighted (non-frequency weigths interpretation)
# sum(ws .* ((ns .- (sum(ws .* ns)/t)).^2)) / (t)
Base.@propagate_inbounds @inline function (::Variance)(ns :: AbstractVector{L}, ws :: AbstractVector{U}, wt :: U) where {L,U<:Real}
# @btime (sum(ws .* ns)/wt)^2 - sum(ws .* (ns.^2))/wt
# @btime (wns = ws .* ns; (sum(wns)/wt)^2 - sum(wns .* ns)/wt)
# @btime (wns = ws .* ns; sum(wns)^2/wt^2 - sum(wns .* ns)/wt)
# @btime (wns = ws .* ns; (sum(wns)^2/wt - sum(wns .* ns))/wt)
(wns = ws .* ns; (sum(wns .* ns) - sum(wns)^2/wt)/wt)
end
# Double
Base.@propagate_inbounds @inline function (::Variance)(
ns_l :: AbstractVector{LU}, sl :: L, tl :: U,
ns_r :: AbstractVector{LU}, sr :: L, tr :: U,
) where {L,LU<:Real,U<:Real}
((tl*sum(ns_l.^2)-sl^2) / (1 - tl)) +
((tr*sum(ns_l.^2)-sr^2) / (1 - tr))
end
# Correction
Base.@propagate_inbounds @inline function (::Variance)(e :: AbstractFloat)
e
end
# TODO write double non weigthed
############################################################################################
# The default classification loss is Shannon's entropy
entropy = ShannonEntropy()
# The default regression loss is variance
variance = Variance()
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 20994 | ################################################################################
################################################################################
# TODO explain
################################################################################
################################################################################
export prune
using DataStructures
using SoleData.DimensionalDatasets: AbstractUnivariateFeature
function prune(tree::DTree; kwargs...)
DTree(prune(root(tree); depth = 0, kwargs...), worldtypes(tree), initconditions(tree))
end
function prune(leaf::AbstractDecisionLeaf; kwargs...)
leaf
end
function prune(node::DTInternal{L}; depth = nothing, kwargs...) where {L}
@assert ! (haskey(kwargs, :max_depth) && isnothing(depth)) "Please specify the node depth: prune(node; depth = ...)"
kwargs = NamedTuple(kwargs)
if haskey(kwargs, :loss_function) && isnothing(kwargs.loss_function)
ks = filter((k)->k != :loss_function, collect(keys(kwargs)))
kwargs = (; zip(ks,kwargs[k] for k in ks)...)
end
pruning_params = merge((
loss_function = default_loss_function(L) :: Union{Nothing,Loss},
max_depth = BOTTOM_MAX_DEPTH :: Int ,
min_samples_leaf = BOTTOM_MIN_SAMPLES_LEAF :: Int ,
min_purity_increase = BOTTOM_MIN_PURITY_INCREASE :: AbstractFloat ,
max_purity_at_leaf = BOTTOM_MAX_PURITY_AT_LEAF :: AbstractFloat ,
max_performance_at_split = BOTTOM_MAX_PERFORMANCE_AT_SPLIT :: AbstractFloat ,
min_performance_at_split = BOTTOM_MIN_PERFORMANCE_AT_SPLIT :: AbstractFloat ,
simplify = false :: Bool ,
), NamedTuple(kwargs))
@assert all(map((x)->(isa(x, DTInternal) || isa(x, DTLeaf)), [this(node), left(node), right(node)]))
# Honor constraints on the number of instances
nt = length(supp_labels(this(node)))
nl = length(supp_labels(left(node)))
nr = length(supp_labels(right(node)))
if (pruning_params.max_depth < depth) ||
(pruning_params.min_samples_leaf > nr) ||
(pruning_params.min_samples_leaf > nl)
return this(node)
end
# Honor performance constraints
performance = leafperformance(this(node))
if (performance > pruning_params.max_performance_at_split ||
performance < pruning_params.min_performance_at_split)
return this(node)
end
function _allpredictions(n)
@warn "Could not simplify tree with node of type $(typeof(n))"
end
_allpredictions(l::DTLeaf) = [prediction(l)]
_allpredictions(n::DTInternal) = [_allpredictions(left(n))..., _allpredictions(right(n))...]
if pruning_params.simplify && unique(_allpredictions(node)) == unique(_allpredictions(this(node)))
return this(node)
end
# Honor purity constraints
# TODO fix missing weights!!
purity = ModalDecisionTrees.compute_purity(supp_labels(this(node)); loss_function = pruning_params.loss_function)
purity_r = ModalDecisionTrees.compute_purity(supp_labels(left(node)); loss_function = pruning_params.loss_function)
purity_l = ModalDecisionTrees.compute_purity(supp_labels(right(node)); loss_function = pruning_params.loss_function)
split_purity_times_nt = (nl * purity_l + nr * purity_r)
# println("purity: ", purity)
# println("purity_r: ", purity_r)
# println("purity_l: ", purity_l)
if (purity_r > pruning_params.max_purity_at_leaf) ||
(purity_l > pruning_params.max_purity_at_leaf) ||
dishonor_min_purity_increase(L, pruning_params.min_purity_increase, purity, split_purity_times_nt, nt)
return this(node)
end
DTInternal(
i_modality(node),
decision(node),
this(node),
prune(left(node); pruning_params..., depth = depth+1),
prune(right(node); pruning_params..., depth = depth+1)
)
end
function prune(forest::DForest{L}, rng::Random.AbstractRNG = Random.GLOBAL_RNG; kwargs...) where {L}
pruning_params = merge((
ntrees = BOTTOM_NTREES ::Integer ,
), NamedTuple(kwargs))
# Remove trees
if pruning_params.ntrees != BOTTOM_NTREES
perm = Random.randperm(rng, length(trees(forest)))[1:pruning_params.ntrees]
forest = slice_forest(forest, perm)
end
pruning_params = Base.structdiff(pruning_params, (;
ntrees = nothing,
))
# Prune trees
# if parametrization_is_going_to_prune(pruning_params)
v_trees = map((t)->prune(t; pruning_params...), trees(forest))
# Note: metrics are lost
forest = DForest{L}(v_trees)
# end
forest
end
function slice_forest(forest::DForest{L}, perm::AbstractVector{<:Integer}) where {L}
# Note: can't compute oob_error
v_trees = @views trees(forest)[perm]
v_metrics = Dict()
if haskey(metrics(forest), :oob_metrics)
v_metrics[:oob_metrics] = @views metrics(forest).oob_metrics[perm]
end
v_metrics = NamedTuple(v_metrics)
DForest{L}(v_trees, v_metrics)
end
# When training many trees with different pruning parametrizations, it can be beneficial to find the non-dominated set of parametrizations,
# train a single tree per each non-dominated parametrization, and prune it afterwards x times. This hack can help save cpu time
function nondominated_pruning_parametrizations(
args::AbstractVector;
do_it_or_not = true,
return_perm = false,
ignore_additional_args = [],
)
args = convert(Vector{NamedTuple}, args)
nondominated_pars, perm =
if do_it_or_not
# To be optimized
to_opt = [
# tree & forest
:max_depth,
# :min_samples_leaf,
:min_purity_increase,
:max_purity_at_leaf,
:max_performance_at_split,
:min_performance_at_split,
# forest
:ntrees,
]
# To be matched
to_match = [
:min_samples_leaf,
:loss_function,
:n_subrelations,
:n_subfeatures,
:initconditions,
:allow_global_splits,
:rng,
:partial_sampling,
:perform_consistency_check,
]
# To be left so that they are used for pruning
to_leave = [
to_opt...,
:loss_function,
ignore_additional_args...,
]
dominating = OrderedDict()
overflowing_args = map((a)->setdiff(collect(keys(a)), [to_opt..., to_match..., to_leave...]), args)
@assert all(length.(overflowing_args) .== 0) "Got unexpected model parameters: $(filter((a)->length(a) != 0, overflowing_args)) . In: $(args)."
# Note: this optimizatio assumes that parameters are defaulted to their top (= most conservative) value
polarity(::Val{:max_depth}) = max
# polarity(::Val{:min_samples_leaf}) = min
polarity(::Val{:min_purity_increase}) = min
polarity(::Val{:max_purity_at_leaf}) = max
polarity(::Val{:max_performance_at_split}) = max
polarity(::Val{:min_performance_at_split}) = min
polarity(::Val{:ntrees}) = max
top(::Val{:max_depth}) = typemin(Int)
# top(::Val{:min_samples_leaf}) = typemax(Int)
top(::Val{:min_purity_increase}) = Inf
top(::Val{:max_purity_at_leaf}) = -Inf
top(::Val{:max_performance_at_split}) = -Inf
top(::Val{:min_performance_at_split}) = Inf
top(::Val{:ntrees}) = typemin(Int)
perm = []
# Find non-dominated parameter set
for this_args in args
base_args = Base.structdiff(this_args, (;
max_depth = nothing,
# min_samples_leaf = nothing,
min_purity_increase = nothing,
max_purity_at_leaf = nothing,
max_performance_at_split = nothing,
min_performance_at_split = nothing,
ntrees = nothing,
))
dominating[base_args] = ((
max_depth = polarity(Val(:max_depth ))((haskey(this_args, :max_depth ) ? this_args.max_depth : top(Val(:max_depth ))),(haskey(dominating, base_args) ? dominating[base_args][1].max_depth : top(Val(:max_depth )))),
# min_samples_leaf = polarity(Val(:min_samples_leaf ))((haskey(this_args, :min_samples_leaf ) ? this_args.min_samples_leaf : top(Val(:min_samples_leaf ))),(haskey(dominating, base_args) ? dominating[base_args][1].min_samples_leaf : top(Val(:min_samples_leaf )))),
min_purity_increase = polarity(Val(:min_purity_increase))((haskey(this_args, :min_purity_increase) ? this_args.min_purity_increase : top(Val(:min_purity_increase))),(haskey(dominating, base_args) ? dominating[base_args][1].min_purity_increase : top(Val(:min_purity_increase)))),
max_purity_at_leaf = polarity(Val(:max_purity_at_leaf ))((haskey(this_args, :max_purity_at_leaf ) ? this_args.max_purity_at_leaf : top(Val(:max_purity_at_leaf ))),(haskey(dominating, base_args) ? dominating[base_args][1].max_purity_at_leaf : top(Val(:max_purity_at_leaf )))),
max_performance_at_split = polarity(Val(:max_performance_at_split ))((haskey(this_args, :max_performance_at_split ) ? this_args.max_performance_at_split : top(Val(:max_performance_at_split ))),(haskey(dominating, base_args) ? dominating[base_args][1].max_performance_at_split : top(Val(:max_performance_at_split )))),
min_performance_at_split = polarity(Val(:min_performance_at_split ))((haskey(this_args, :min_performance_at_split ) ? this_args.min_performance_at_split : top(Val(:min_performance_at_split ))),(haskey(dominating, base_args) ? dominating[base_args][1].min_performance_at_split : top(Val(:min_performance_at_split )))),
ntrees = polarity(Val(:ntrees ))((haskey(this_args, :ntrees ) ? this_args.ntrees : top(Val(:ntrees ))),(haskey(dominating, base_args) ? dominating[base_args][1].ntrees : top(Val(:ntrees )))),
),[(haskey(dominating, base_args) ? dominating[base_args][2] : [])..., this_args])
outer_idx = findfirst((k)->k==base_args, collect(keys(dominating)))
inner_idx = length(dominating[base_args][2])
push!(perm, (outer_idx, inner_idx))
end
[
begin
if (rep_args.max_depth == top(Val(:max_depth)) ) rep_args = Base.structdiff(rep_args, (; max_depth = nothing)) end
# if (rep_args.min_samples_leaf == top(Val(:min_samples_leaf)) ) rep_args = Base.structdiff(rep_args, (; min_samples_leaf = nothing)) end
if (rep_args.min_purity_increase == top(Val(:min_purity_increase))) rep_args = Base.structdiff(rep_args, (; min_purity_increase = nothing)) end
if (rep_args.max_purity_at_leaf == top(Val(:max_purity_at_leaf)) ) rep_args = Base.structdiff(rep_args, (; max_purity_at_leaf = nothing)) end
if (rep_args.max_performance_at_split == top(Val(:max_performance_at_split)) ) rep_args = Base.structdiff(rep_args, (; max_performance_at_split = nothing)) end
if (rep_args.min_performance_at_split == top(Val(:min_performance_at_split)) ) rep_args = Base.structdiff(rep_args, (; min_performance_at_split = nothing)) end
if (rep_args.ntrees == top(Val(:ntrees )) ) rep_args = Base.structdiff(rep_args, (; ntrees = nothing)) end
this_args = merge(base_args, rep_args)
(this_args, [begin
ks = intersect(to_leave, keys(post_pruning_args))
(; zip(ks, [post_pruning_args[k] for k in ks])...)
end for post_pruning_args in post_pruning_argss])
end for (i_model, (base_args,(rep_args, post_pruning_argss))) in enumerate(dominating)
], perm
else
zip(args, Iterators.repeated([(;)])) |> collect, zip(1:length(args), Iterators.repeated(1)) |> collect
end
if return_perm
nondominated_pars, perm
else
nondominated_pars
end
end
#
function train_functional_leaves(
tree::DTree,
datasets::AbstractVector{<:Tuple{Any,AbstractVector}},
args...;
kwargs...,
)
# World sets for (dataset, modality, instance)
worlds = Vector{Vector{Vector{<:WST} where {WorldType<:AbstractWorld,WST<:Vector{WorldType}}}}([
ModalDecisionTrees.initialworldsets(X, initconditions(tree))
for (X,Y) in datasets])
DTree(train_functional_leaves(root(tree), worlds, datasets, args...; kwargs...), worldtypes(tree), initconditions(tree))
end
# At internal nodes, a functional model is trained by calling a callback function, and the leaf is created
function train_functional_leaves(
node::DTInternal{L},
worlds::AbstractVector{<:AbstractVector{<:AbstractVector{<:AbstractWorlds}}},
datasets::AbstractVector{D},
args...;
kwargs...,
) where {L,D<:Tuple{Any,AbstractVector}}
# Each dataset is sliced, and two subsets are derived (left and right)
datasets_l = D[]
datasets_r = D[]
worlds_l = AbstractVector{<:AbstractVector{<:AbstractWorlds}}[]
worlds_r = AbstractVector{<:AbstractVector{<:AbstractWorlds}}[]
for (i_dataset,(X,Y)) in enumerate(datasets)
satisfied_idxs = Integer[]
unsatisfied_idxs = Integer[]
for i_instance in 1:ninstances(X)
(satisfied,new_worlds) =
modalstep(
modality(X, i_modality(node)),
i_instance,
worlds[i_dataset][i_modality(node)][i_instance],
decision(node)
)
if satisfied
push!(satisfied_idxs, i_instance)
else
push!(unsatisfied_idxs, i_instance)
end
worlds[i_dataset][i_modality(node)][i_instance] = new_worlds
end
push!(datasets_l, slicedataset((X,Y), satisfied_idxs; allow_no_instances = true))
push!(datasets_r, slicedataset((X,Y), unsatisfied_idxs; allow_no_instances = true))
push!(worlds_l, [modality_worlds[satisfied_idxs] for modality_worlds in worlds[i_dataset]])
push!(worlds_r, [modality_worlds[unsatisfied_idxs] for modality_worlds in worlds[i_dataset]])
end
DTInternal(
i_modality(node),
decision(node),
# train_functional_leaves(node.this, worlds, datasets, args...; kwargs...), # TODO test whether this makes sense and works correctly
this(node),
train_functional_leaves(left(node), worlds_l, datasets_l, args...; kwargs...),
train_functional_leaves(right(node), worlds_r, datasets_r, args...; kwargs...),
)
end
# At leaves, a functional model is trained by calling a callback function, and the leaf is created
function train_functional_leaves(
leaf::AbstractDecisionLeaf{L},
worlds::AbstractVector{<:AbstractVector{<:AbstractVector{<:AbstractWorlds}}},
datasets::AbstractVector{<:Tuple{Any,AbstractVector}};
train_callback::Function,
) where {L<:Label}
functional_model = train_callback(datasets)
@assert length(datasets) == 2 "TODO expand code: $(length(datasets))"
(train_X, train_Y), (valid_X, valid_Y) = datasets[1], datasets[2]
# println("train_functional_leaves")
# println(typeof(train_X))
# println(hasmethod(size, (typeof(train_X),)) ? size(train_X) : nothing)
# println(hasmethod(length, (typeof(train_X),)) ? length(train_X) : nothing)
# println(ninstances(train_X))
# println(typeof(valid_X))
# println(hasmethod(size, (typeof(valid_X),)) ? size(valid_X) : nothing)
# println(hasmethod(length, (typeof(valid_X),)) ? length(valid_X) : nothing)
# println(ninstances(valid_X))
supp_train_labels = train_Y
supp_valid_labels = valid_Y
supp_train_predictions = functional_model(train_X)
supp_valid_predictions = functional_model(valid_X)
function get_predicting_function(model) # TODO avoid this definition, just return the model
return X->model(X)::Vector{L}
end
NSDTLeaf{L}(get_predicting_function(functional_model), supp_train_labels, supp_valid_labels, supp_train_predictions, supp_valid_predictions)
end
############################################################################################
############################################################################################
############################################################################################
function _variable_countmap(leaf::AbstractDecisionLeaf{L}; weighted = false) where {L<:Label}
return Tuple{ModalityId,Int}[]
end
function _variable_countmap(node::DTInternal{L}; weighted = false) where {L<:Label}
th = begin
d = decision(node)
f = feature(d)
(f isa AbstractUnivariateFeature) ?
[((i_modality(node), f.i_variable), (weighted ? length(supp_labels) : 1)),] : []
end
return [
th...,
_variable_countmap(left(node); weighted = weighted)...,
_variable_countmap(right(node); weighted = weighted)...
]
end
function variable_countmap(tree::DTree{L}; weighted = false) where {L<:Label}
vals = _variable_countmap(root(tree); weighted = weighted)
if !weighted
countmap(first.(vals))
else
c = Dict([var => 0 for var in unique(first.(vals))])
for (var, weight) in vals
c[var] += weight
end
Dict([var => count/sum(values(c)) for (var, count) in c])
end
end
function variable_countmap(forest::DForest{L}; weighted = false) where {L<:Label}
vals = [_variable_countmap(root(t); weighted = weighted) for t in trees(forest)] |> Iterators.flatten
if !weighted
countmap(first.(vals))
else
c = Dict([var => 0 for var in unique(first.(vals))])
for (var, weight) in vals
c[var] += weight
end
Dict([var => count/sum(values(c)) for (var, count) in c])
end
end
############################################################################################
############################################################################################
############################################################################################
"""
Squashes a vector of `DTNode`s into a single leaf using `bestguess`.
"""
function squashtoleaf(nodes::AbstractVector{<:DTNode})
squashtoleaf(map((n)->(n isa AbstractDecisionLeaf ? n : this(n)), nodes))
end
function squashtoleaf(leaves::AbstractVector{<:DTLeaf})
@assert length(unique(predictiontype.(leaves))) == 1 "Cannot squash leaves " *
"with different prediction " *
"types: $(join(unique(predictiontype.(leaves)), ", "))"
L = Union{predictiontype.(leaves)...}
labels = collect(Iterators.flatten(map((leaf)->supp_labels(leaf), leaves)))
# @show labels
# @show L
if length(labels) == 0
error("Cannot squash to leaf with empty supporting labels.")
end
DTLeaf{L}(L.(labels))
end
function squashtoleaf(leaves::AbstractVector{<:NSDTLeaf})
@assert length(unique(predictiontype.(leaves))) == 1 "Cannot squash leaves " *
"with different prediction " *
"types: $(join(unique(predictiontype.(leaves)), ", "))"
L = Union{predictiontype.(leaves)...}
supp_train_labels = L.(collect(Iterators.flatten(map((leaf)->leaf.supp_train_labels, leaves))))
supp_valid_labels = L.(collect(Iterators.flatten(map((leaf)->leaf.supp_valid_labels, leaves))))
supp_train_predictions = L.(collect(Iterators.flatten(map((leaf)->leaf.supp_train_predictions, leaves))))
supp_valid_predictions = L.(collect(Iterators.flatten(map((leaf)->leaf.supp_valid_predictions, leaves))))
supp_labels = [supp_train_labels..., supp_valid_labels..., supp_train_predictions..., supp_valid_predictions...]
predicting_function = (args...; kwargs...)->(bestguess(supp_labels))
DTLeaf{L}(
predicting_function,
supp_train_labels,
supp_valid_labels,
supp_train_predictions,
supp_valid_predictions,
)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 6908 | export printmodel
# export print_tree, print_forest <--- TODO remove
# print model
function printmodel(model::Union{DTNode,DTree,DForest,RootLevelNeuroSymbolicHybrid}; kwargs...)
printmodel(stdout, model; kwargs...)
end
function printmodel(io::IO, model::Union{DTNode,DTree,DForest,RootLevelNeuroSymbolicHybrid}; kwargs...)
print(io, displaymodel(model; kwargs...))
end
# function printmodel(io::IO, model::Union{DTNode,DTree}; kwargs...)
# print_tree(io, model; kwargs...)
# end
# function printmodel(io::IO, model::DForest; kwargs...)
# print_forest(io, model; kwargs...)
# end
# function printmodel(io::IO, model::RootLevelNeuroSymbolicHybrid; kwargs...)
# print_rlnsdt(io, model; kwargs...)
# end
# function print_tree(tree::Union{DTNode,DTree}, args...; kwargs...)
# print_tree(stdout, tree, args...; kwargs...)
# end
# function print_forest(forest::DForest, args...; kwargs...)
# print_forest(stdout, forest, args...; kwargs...)
# end
# function print_rlnsdt(rlnsdt::RootLevelNeuroSymbolicHybrid, args...; kwargs...)
# print_rlnsdt(stdout, rlnsdt, args...; kwargs...)
# end
# function print_tree(io::IO, tree::Union{DTNode,DTree}, args...; kwargs...)
# print(io, displaymodel(tree; args..., kwargs...))
# end
# function print_forest(io::IO, forest::DForest, args...; kwargs...)
# print(io, displaymodel(forest; args..., kwargs...))
# end
# function print_rlnsdt(io::IO, rlnstd::RootLevelNeuroSymbolicHybrid, args...; kwargs...)
# print(io, displaymodel(rlnstd; args..., kwargs...))
# end
############################################################################################
function displaybriefprediction(leaf::DTLeaf)
string(prediction(leaf))
end
# TODO move to SoleBase and rename to string_ellipsis
function str_ellipsis(str, maxcharacters = 60)
str = "$(str)"
if length(str) <= maxcharacters
str
else
str[1:div(maxcharacters,2)] * "..." * str[(end-div(maxcharacters,2)+1):end]
end
end
function displaybriefprediction(leaf::NSDTLeaf)
# "{$(leaf.predicting_function), size = $(Base.summarysize(leaf.predicting_function))}"
str = "$(leaf.predicting_function)"
"<$(str_ellipsis(str))>"
end
function get_metrics_str(metrics::NamedTuple)
metrics_str_pieces = []
# if haskey(metrics,:n_inst)
# push!(metrics_str_pieces, "ninst = $(metrics.n_inst)")
# end
if haskey(metrics,:confidence)
push!(metrics_str_pieces, "conf = $(@sprintf "%.4f" metrics.confidence)")
end
if haskey(metrics,:lift)
push!(metrics_str_pieces, "lift = $(@sprintf "%.2f" metrics.lift)")
end
if haskey(metrics,:support)
push!(metrics_str_pieces, "supp = $(@sprintf "%.4f" metrics.support)")
end
if haskey(metrics,:conviction)
push!(metrics_str_pieces, "conv = $(@sprintf "%.4f" metrics.conviction)")
end
if haskey(metrics,:sensitivity_share)
push!(metrics_str_pieces, "sensitivity_share = $(@sprintf "%.4f" metrics.sensitivity_share)")
end
if haskey(metrics,:var)
push!(metrics_str_pieces, "var = $(@sprintf "%.4f" metrics.var)")
end
if haskey(metrics,:mae)
push!(metrics_str_pieces, "mae = $(@sprintf "%.4f" metrics.mae)")
end
if haskey(metrics,:rmse)
push!(metrics_str_pieces, "rmse = $(@sprintf "%.4f" metrics.rmse)")
end
metrics_str = join(metrics_str_pieces, ", ")
if haskey(metrics,:n_correct) # Classification
"$(metrics.n_correct)/$(metrics.n_inst) ($(metrics_str))"
else # Regression
"$(metrics.n_inst) ($(metrics_str))"
end
end
function displaymodel(
tree::DTree;
metrics_kwargs...,
)
return displaymodel(root(tree); metrics_kwargs...)
end
function displaymodel(
forest::DForest,
args...;
kwargs...,
)
outstr = ""
_ntrees = ntrees(forest)
for i_tree in 1:_ntrees
outstr *= "Tree $(i_tree) / $(_ntrees)\n"
outstr *= displaymodel(trees(forest)[i_tree], args...; kwargs...)
end
return outstr
end
function displaymodel(
nsdt::RootLevelNeuroSymbolicHybrid,
args...;
kwargs...,
)
outstr = ""
outstr *= "Feature function: $(nsdt.feature_function)"
_ntrees = ntrees(nsdt)
for (i_tree,tree) in enumerate(nsdt.trees)
outstr *= "Tree $(i_tree) / $(_ntrees)\n"
outstr *= displaymodel(tree, args...; kwargs...)
end
return outstr
end
function displaymodel(
leaf::DTLeaf;
indentation_str="",
depth = 0,
variable_names_map = nothing,
max_depth = nothing,
hidemodality = false,
syntaxstring_kwargs = (;),
kwargs...,
)
metrics = get_metrics(leaf; kwargs...)
metrics_str = get_metrics_str(metrics)
return "$(displaybriefprediction(leaf)) : $(metrics_str)\n"
end
function displaymodel(
leaf::NSDTLeaf;
indentation_str="",
depth = 0,
variable_names_map = nothing,
max_depth = nothing,
hidemodality = false,
syntaxstring_kwargs = (;),
kwargs...,
)
train_metrics_str = get_metrics_str(get_metrics(leaf; train_or_valid = true, kwargs...))
valid_metrics_str = get_metrics_str(get_metrics(leaf; train_or_valid = false, kwargs...))
return "$(displaybriefprediction(leaf)) : {TRAIN: $(train_metrics_str); VALID: $(valid_metrics_str)}\n"
end
function displaymodel(
node::DTInternal;
indentation_str="",
depth = 0,
variable_names_map = nothing,
max_depth = nothing,
hidemodality = false,
syntaxstring_kwargs = (;),
# TODO print_rules = false,
metrics_kwargs...,
)
outstr = ""
if isnothing(max_depth) || depth < max_depth
dec_str = displaydecision(node; variable_names_map = variable_names_map, hidemodality = hidemodality, syntaxstring_kwargs...)
outstr *= "$(rpad(dec_str, 59-(length(indentation_str) == 0 ? length(indentation_str)-1 : length(indentation_str)))) "
# outstr *= "$(60-max(length(indentation_str)+1)) "
outstr *= displaymodel(this(node); indentation_str = "", metrics_kwargs...)
outstr *= indentation_str * "✔ " # "╭✔
outstr *= displaymodel(left(node);
indentation_str = indentation_str*"│",
depth = depth+1,
variable_names_map = variable_names_map,
max_depth = max_depth,
hidemodality = max_depth,
syntaxstring_kwargs = syntaxstring_kwargs,
metrics_kwargs...,
)
outstr *= indentation_str * "✘ " # "╰✘
outstr *= displaymodel(right(node);
indentation_str = indentation_str*" ",
depth = depth+1,
variable_names_map = variable_names_map,
max_depth = max_depth,
hidemodality = max_depth,
syntaxstring_kwargs = syntaxstring_kwargs,
metrics_kwargs...,
)
else
depth != 0 && (outstr *= " ")
outstr *= "[...]\n"
end
return outstr
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2150 |
function dishonor_min_purity_increase(::Type{L}, min_purity_increase, purity, best_purity_times_nt, nt) where {L<:CLabel}
(best_purity_times_nt/nt - purity < min_purity_increase)
end
function dishonor_min_purity_increase(::Type{L}, min_purity_increase, purity, best_purity_times_nt, nt) where {L<:RLabel}
# (best_purity_times_nt - tsum * label <= min_purity_increase * nt) # ORIGINAL
(best_purity_times_nt/nt - purity < min_purity_increase * nt)
end
# TODO fix
# function _compute_purity( # faster_version assuming L<:Integer and labels going from 1:n_classes
# labels ::AbstractVector{L},
# n_classes ::Integer,
# weights ::AbstractVector{U} = default_weights(labels);
# loss_function ::Union{Nothing,Loss} = default_loss_function(L),
# ) where {L<:CLabel,L<:Integer,U}
# nc = fill(zero(U), n_classes)
# @simd for i in 1:max(length(labels),length(weights))
# nc[labels[i]] += weights[i]
# end
# nt = sum(nc)
# return loss_function(nc, nt)::Float64
# end
function compute_purity(
labels ::AbstractVector{L},
weights ::AbstractVector{U} = default_weights(labels);
loss_function ::Union{Nothing,Loss} = default_loss_function(L),
) where {L<:CLabel,U}
nc = Dict{L,U}()
@simd for i in 1:max(length(labels),length(weights))
nc[labels[i]] = get(nc, labels[i], 0) + weights[i]
end
nc = collect(values(nc))
nt = sum(nc)
return loss_function(nc, nt)::Float64
end
function _compute_purity(
labels ::AbstractVector{L},
weights ::AbstractVector{U} = default_weights(labels);
loss_function ::Union{Nothing,Loss} = default_loss_function(L),
) where {L<:RLabel,U}
# sums = labels .* weights
nt = sum(weights)
return (loss_function(labels, weights, nt))::Float64
end
function compute_purity(
labels ::AbstractVector{L},
weights ::AbstractVector{U} = default_weights(labels);
loss_function ::Union{Nothing,Loss} = default_loss_function(L),
) where {L<:RLabel,U}
_compute_purity(labels, weights; loss_function = loss_function)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 653 | # partially written by Poom Chiarawongse <[email protected]>
# adapted from the Julia Base.Sort Library
Base.@propagate_inbounds @inline function partition!(v::AbstractVector, w::AbstractVector{T}, pivot::T, region::UnitRange{<:Integer}) where T
i, j = 1, length(region)
r_start = region.start - 1
@inbounds while true
while i <= length(region) && w[i] <= pivot; i += 1; end;
while j >= 1 && w[j] > pivot; j -= 1; end;
i >= j && break
ri = r_start + i
rj = r_start + j
v[ri], v[rj] = v[rj], v[ri]
w[i], w[j] = w[j], w[i]
i += 1; j -= 1
end
return j
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3838 | using ..ModalDecisionTrees
using SoleData
using SoleData.DimensionalDatasets
using ..ModalDecisionTrees: AbstractFeature, TestOperator
using ..ModalDecisionTrees: ModalityId
using ..ModalDecisionTrees: DTLeaf, DTNode, DTInternal
import SoleData: feature, test_operator, threshold
export DecisionPath, DecisionPathNode,
get_path_in_tree, get_internalnode_dirname,
mk_tree_path, get_tree_path_as_dirpath
struct DecisionPathNode
taken :: Bool
feature :: AbstractFeature
test_operator :: TestOperator
threshold :: T where T
worlds :: AbstractWorlds
end
taken(n::DecisionPathNode) = n.taken
feature(n::DecisionPathNode) = n.feature
test_operator(n::DecisionPathNode) = n.test_operator
threshold(n::DecisionPathNode) = n.threshold
worlds(n::DecisionPathNode) = n.worlds
const DecisionPath = Vector{DecisionPathNode}
_get_path_in_tree(leaf::DTLeaf, X::Any, i_instance::Integer, worlds::AbstractVector{<:AbstractWorlds}, i_modality::ModalityId, paths::Vector{DecisionPath})::AbstractWorlds = return worlds[i_modality]
function _get_path_in_tree(tree::DTInternal, X::MultiLogiset, i_instance::Integer, worlds::AbstractVector{<:AbstractWorlds}, i_modality::Integer, paths::Vector{DecisionPath})::AbstractWorlds
satisfied = true
(satisfied,new_worlds,worlds_map) =
modalstep(
modality(X, i_modality(tree)),
i_instance,
worlds[i_modality(tree)],
decision(tree),
Val(true)
)
worlds[i_modality(tree)] = new_worlds
survivors = _get_path_in_tree((satisfied ? left(tree) : right(tree)), X, i_instance, worlds, i_modality(tree), paths)
# if survivors of next step are in the list of worlds viewed by one
# of the just accumulated "new_worlds" then that world is a survivor
# for this step
new_survivors::AbstractWorlds = Vector{AbstractWorld}()
for curr_w in keys(worlds_map)
if length(intersect(worlds_map[curr_w], survivors)) > 0
push!(new_survivors, curr_w)
end
end
pushfirst!(paths[i_instance], DecisionPathNode(satisfied, feature(decision(tree)), test_operator(decision(tree)), thresholda(decision(tree)), deepcopy(new_survivors)))
return new_survivors
end
function get_path_in_tree(tree::DTree{S}, X)::Vector{DecisionPath} where {S}
_ninstances = ninstances(X)
paths::Vector{DecisionPath} = [ DecisionPath() for i in 1:_ninstances ]
for i_instance in 1:_ninstances
worlds = ModalDecisionTrees.mm_instance_initialworldset(X, tree, i_instance)
_get_path_in_tree(root(tree), X, i_instance, worlds, 1, paths)
end
paths
end
function get_internalnode_dirname(node::DTInternal)::String
replace(displaydecision(node), " " => "_")
end
mk_tree_path(leaf::DTLeaf; path::String) = touch(path * "/" * string(prediction(leaf)) * ".txt")
function mk_tree_path(node::DTInternal; path::String)
dir_name = get_internalnode_dirname(node)
mkpath(path * "/Y_" * dir_name)
mkpath(path * "/N_" * dir_name)
mk_tree_path(left(node); path = path * "/Y_" * dir_name)
mk_tree_path(right(node); path = path * "/N_" * dir_name)
end
function mk_tree_path(tree_hash::String, tree::DTree; path::String)
mkpath(path * "/" * tree_hash)
mk_tree_path(root(tree); path = path * "/" * tree_hash)
end
function get_tree_path_as_dirpath(tree_hash::String, tree::DTree, decpath::DecisionPath; path::String)::String
current = root(tree)
result = path * "/" * tree_hash
for node in decpath
if current isa DTLeaf break end
result *= "/" * (node.taken ? "Y" : "N") * "_" * get_internalnode_dirname(current)
current = node.taken ? left(current) : right(current)
end
result
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1156 | ################################################################################
# Experimental features
################################################################################
module experimentals
using ModalDecisionTrees
using ModalDecisionTrees:
relation, feature, test_operator, threshold,
is_propositional_decision,
is_global_decision
using SoleData
using SoleData.DimensionalDatasets
using SoleLogics
using SoleData: nfeatures, nrelations,
nmodalities, eachmodality, modality,
displaystructure,
#
relations,
#
MultiLogiset,
SupportedLogiset
using SoleData: scalarlogiset
using SoleData: AbstractWorld, AbstractRelation
using SoleData: AbstractWorlds, Worlds
using SoleLogics: FullDimensionalFrame
using SoleData.DimensionalDatasets
using SoleData: MultiLogiset
using SoleData: Worlds
using SoleData: worldtype
using SoleData: OneWorld
using SoleData: Interval, Interval2D
using SoleData: IARelations
MDT = ModalDecisionTrees
SL = SoleLogics
include("parse.jl")
include("decisionpath.jl")
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 14083 | function parse_tree(
tree_str::String;
check_format = true,
_depth = 0,
offset = 0,
openpar = SoleData.UVF_OPENING_PARENTHESIS,
closepar = SoleData.UVF_CLOSING_PARENTHESIS,
varprefix = SoleData.UVF_VARPREFIX,
worldtypes = Type{SL.AbstractWorld}[],
initconditions = MDT.InitialCondition[],
)
@assert openpar in ["[", "("] "Unexpected opening bracket: $(openpar)"
@assert closepar in ["]", ")"] "Unexpected closing bracket: $(closepar)"
@assert length(worldtypes) > 0 "Please, provide argument `worldtypes`."
@assert length(initconditions) > 0 "Please, provide argument `initconditions`."
worldtypes = Type{<:SL.AbstractWorld}[worldtypes...]
initconditions = MDT.InitialCondition[initconditions...]
root = _parse_tree(tree_str; check_format = check_format, _depth = _depth, offset = offset, varprefix = varprefix)
DTree(root, worldtypes, initconditions)
end
function _parse_tree(
tree_str::String;
check_format = true,
_depth = 0,
offset = 0,
varprefix = SoleData.UVF_VARPREFIX,
openpar = SoleData.UVF_OPENING_PARENTHESIS,
closepar = SoleData.UVF_CLOSING_PARENTHESIS,
)
########################################################################################
########################################################################################
########################################################################################
child_kwargs = (;
varprefix = varprefix,
)
V = varprefix
# r"^[-+]?([0-9]+(\.[0-9]*)?|\.[0-9]+)$"
# _threshold_ex = "[-+]?(?:[0-9]+(\\.[0-9]*)?|\\.[0-9]+)" # TODO use smarter regex (e.g., https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch06s10.html )
# _threshold_ex = "[^\\)\\s)]+" # TODO use smarter regex (e.g., https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch06s10.html )
# Regex("[-+]?([0-9]+(\\.[0-9]*)?|\\.[0-9]+)") == r"[-+]?([0-9]+(\.[0-9]*)?|\.[0-9]+)"
# _threshold_ex = "[-+]?([0-9]+(\\.[0-9]*)?|\\.[0-9]+)" # GOOD
_threshold_ex = "[-+]?(?:[0-9]+(?:\\.[0-9]*)?|\\.[0-9]+)(?:e?[-+]?)(?:[0-9]+)?" # https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch06s10.html
_indentation_ex = "[ │]*[✔✘]"
_metrics_ex = "\\(\\S*.*\\)"
_feature_ex = "(?:[^\\s\\(\\)]+)\\s+(?:(?:⫹|⫺|⪳|⪴|⪵|⪶|↗|↘|>|<|=|≤|≥|<=|>=))"
_normal_feature_ex_capturing = "^(\\S*)\\$openpar$V(\\d+)\\$closepar\\s+((?:>|<|=|≤|≥|<=|>=))\$"
_propositional_feature_ex_capturing = "^$V(\\d+)\\s+((?:>|<|=|≤|≥|<=|>=))\$"
_special_feature_ex_capturing = "^$V(\\d+)\\s+((?:⫹|⫺|⪳|⪴|⪵|⪶|↗|↘))\$"
_decision_pr = "$(_feature_ex)\\s+(?:$(_threshold_ex))"
_decision_pr__capturing = "($(_feature_ex))\\s+($(_threshold_ex))"
leaf_ex = "(?:\\S+)\\s+:\\s+\\d+/\\d+(?:\\s+(?:$(_metrics_ex)))?"
leaf_ex__capturing = "(\\S+)\\s+:\\s+(\\d+)/(\\d+)(?:\\s+($(_metrics_ex)))?"
decision_ex = "(?:SimpleDecision\\()?(?:⟨(?:\\S+)⟩\\s*)?(?:$(_decision_pr)|\\(\\s*$(_decision_pr)\\s*\\))(?:\\))?"
decision_ex__capturing = "(?:SimpleDecision\\()?(?:⟨(\\S+)⟩\\s*)?\\(?\\s*$(_decision_pr__capturing)\\s*\\)?(?:\\))?"
# _decision_ex__capturing = "(?:⟨(\\S+)⟩\\s*)?\\s*($(_decision_pr__capturing)|\\(\\s*$(_decision_pr__capturing)\\s*\\))"
# TODO default frame to 1
# split_ex = "(?:\\s*{(\\d+)}\\s+)?($(decision_ex))(?:\\s+($(leaf_ex)))?"
split_ex = "\\s*{(\\d+)}\\s+($(decision_ex))(?:\\s*($(leaf_ex)))?"
blank_line_regex = Regex("^\\s*\$")
split_line_regex = Regex("^($(_indentation_ex)\\s+)?$(split_ex)\\s*\$")
leaf_line_regex = Regex("^($(_indentation_ex)\\s+)?$(leaf_ex)\\s*\$")
function _parse_simple_real(x)
x = parse(Float64, x)
x = isinteger(x) ? Int(x) : x
end
function _parse_decision((i_this_line, decision_str)::Tuple{<:Integer,<:AbstractString},)
function _parse_relation(relation_str)
# parsable_rels = concretesubtypes(AbstractRelation) TODO
parsable_rels = [
SL.globalrel,
SL.identityrel,
SoleLogics.IARelations...,
SoleLogics.IA3Relations...,
SoleLogics.IA7Relations...,
SoleLogics.RCC5Relations...,
SoleLogics.RCC8Relations...,
] |> unique
rel_d = Dict([
[
"Ai" => SL.IA_Ai,
"Li" => SL.IA_Li,
"Bi" => SL.IA_Bi,
"Ei" => SL.IA_Ei,
"Di" => SL.IA_Di,
"Oi" => SL.IA_Oi,
]...,
[syntaxstring(r) => r for r in parsable_rels]...
])
if isnothing(relation_str)
identityrel
else
rel_d[relation_str]
end
end
function _parse_feature_test_operator(feature_str)
m_normal = match(Regex(_normal_feature_ex_capturing), feature_str)
m_special = match(Regex(_special_feature_ex_capturing), feature_str)
m_propos = match(Regex(_propositional_feature_ex_capturing), feature_str)
if !isnothing(m_normal) && length(m_normal) == 3
feature_fun, i_variable, test_operator = m_normal
function eval_feature_fun_constructor(fun_str)
if fun_str == "min" MDT.VariableMin
elseif fun_str == "max" MDT.VariableMax
else
try
fun = eval(Symbol(fun_str))
(i_variable)->MDT.UnivariateFeature(i_variable, fun)
catch
(i_variable)->MDT.UnivariateNamedFeature(i_variable, fun_str)
end
end
end
feature_type = eval_feature_fun_constructor(feature_fun)
i_variable = parse(Int, i_variable)
test_operator = eval(Symbol(test_operator))
feature_type(i_variable), test_operator
elseif !isnothing(m_special) && length(m_special) == 2
i_variable, feature_fun_test_operator = m_special
feature_fun_test_operator_d = Dict([
"⪴" => (i_variable)->(SoleData.VariableMin(i_variable), ≥),
"⪴₈₀" => (i_variable)->(SoleData.VariableSoftMin(i_variable, 80), ≥),
"⪳₈₀" => (i_variable)->(SoleData.VariableSoftMax(i_variable, 80), ≤),
"⪳" => (i_variable)->(SoleData.VariableMax(i_variable), ≤),
"↘" => (i_variable)->(SoleData.VariableMin(i_variable), ≤),
"↗" => (i_variable)->(SoleData.VariableMax(i_variable), ≥),
])
feature_fun_test_operator = feature_fun_test_operator_d[feature_fun_test_operator]
i_variable = parse(Int, i_variable)
feature_fun_test_operator(i_variable)
elseif !isnothing(m_propos) && length(m_propos) == 2
i_variable, test_operator = m_propos
i_variable = parse(Int, i_variable)
feature = MDT.UnivariateNamedFeature(i_variable, "")
test_operator = eval(Symbol(test_operator))
feature, test_operator
else
error("Unexpected format encountered on line $(i_this_line+offset) when parsing feature: \"$(feature_str)\". Matches $(m_normal), $(m_special), $(m_propos)")
end
end
print(repeat(" ", _depth))
m = match(Regex(decision_ex), decision_str)
@assert !isnothing(m) "Unexpected format encountered on line $(i_this_line+offset) when parsing decision: \"$(decision_str)\". Matches: $(m)"
m = match(Regex(decision_ex__capturing), decision_str)
@assert !isnothing(m) && length(m) == 3 "Unexpected format encountered on line $(i_this_line+offset) when parsing decision: \"$(decision_str)\". Matches: $(m) Expected matches = 3"
# print(repeat(" ", _depth))
# println(m)
# @show m[3]
relation, feature_test_operator, threshold = m
relation = _parse_relation(relation)
feature, test_operator = _parse_feature_test_operator(feature_test_operator)
threshold = _parse_simple_real(threshold)
RestrictedDecision(ScalarExistentialFormula(relation, feature, test_operator, threshold))
end
function _parse_leaf((i_this_line, leaf_str)::Tuple{<:Integer,<:AbstractString},)
m = match(Regex(leaf_ex__capturing), leaf_str)
@assert !isnothing(m) && length(m) == 4 "Unexpected format encountered on line $(i_this_line+offset) when parsing leaf: \"$(leaf_str)\". Matches: $(m) Expected matches = 4"
# print(repeat(" ", _depth))
# println(m)
class, n_good, n_tot, metrics = m
class = String(class)
n_good = parse(Int, n_good)
n_tot = parse(Int, n_tot)
# println(class, n_good, n_tot, metrics)
# Note: metrics are not used
DTLeaf(class, String[fill(class, n_good)..., fill("NO_$(class)", n_tot-n_good)...])
end
########################################################################################
########################################################################################
########################################################################################
# Can't do this because then i_line is misaligned
# tree_str = strip(tree_str)
lines = enumerate(split(tree_str, "\n")) |> collect
if check_format
for (i_line, line) in lines
!isempty(strip(line)) || continue
_line = line
blank_match = match(blank_line_regex, _line)
split_match = match(split_line_regex, _line)
leaf_match = match(leaf_line_regex, _line)
is_blank = !isnothing(blank_match)
is_split = !isnothing(split_match)
is_leaf = !isnothing(leaf_match)
# DEBUG
# println(match(Regex("($(_indentation_ex)\\s+)?"), _line))
# println(match(Regex("^\\s*($(_indentation_ex)\\s+)?({(\\d+)}\\s+)?"), _line))
# println(match(Regex("^\\s*($(_indentation_ex)\\s+)?({(\\d+)}\\s+)?$V(\\d+)"), _line))
# println(match(Regex("^\\s*($(_indentation_ex)\\s+)?({(\\d+)}\\s+)?(\\S+\\s+)?$V(\\d+)"), _line))
# println(match(Regex("^\\s*($(_indentation_ex)\\s+)?({(\\d+)}\\s+)?(\\S+\\s+)?$V(\\d+)\\s+([⫹⫺⪳⪴⪵⪶↗↘])"), _line))
# println(match(Regex("^\\s*($(_indentation_ex)\\s+)?({(\\d+)}\\s+)?$(_decision_pr)"), _line))
# println(match(Regex("^\\s*($(_indentation_ex)\\s+)?({(\\d+)}\\s+)?$(decision_ex)"), _line))
# println(match(Regex("^\\s*($(_indentation_ex)\\s+)?({(\\d+)}\\s+)?$(decision_ex)\\s+$(leaf_ex)"), _line))
@assert xor(is_blank, is_split, is_leaf) "Could not parse line $(i_line+offset): \"$(line)\". $((is_blank, is_split, is_leaf))"
end
end
_lines = filter(((i_line, line),)->(!isempty(strip(line))), lines)
if length(_lines) == 1 # a leaf
_parse_leaf(_lines[1])
else # a split
this_line, yes_line, no_line = begin
this_line = nothing
yes_line = -Inf
no_line = Inf
for (i_line, line) in lines
!isempty(strip(line)) || continue
_line = line
if !isnothing(match(r"^\s*{.*$", _line))
@assert isnothing(this_line) "Cannot have more than one row beginning with '{'"
this_line = i_line
yes_line = i_line + 1
# print(repeat(" ", _depth))
# println("First: $(_line)")
elseif i_line == yes_line
@assert startswith(_line, "✔") "Line $(i_line+offset) \"$(_line)\" should start with '✔'"
elseif no_line > i_line > yes_line
if !startswith(_line, "│")
@assert startswith(_line, "✘") "Line $(i_line+offset) \"$(_line)\" should start with '✘'"
no_line = i_line-1
end
else
@assert startswith(_line, " ") "Line $(i_line+offset) \"$(_line)\" should start with ' '"
end
end
this_line, yes_line, no_line
end
function clean_lines(lines)
join([(isempty(strip(line)) ? line : begin
begin_ex = Regex("^([ │]|[✔✘]\\s+)(.*)\$")
match(begin_ex, line)[2]
end) for (i_line, line) in lines], "\n")
end
left_tree_str, right_tree_str = clean_lines(lines[yes_line:no_line]), clean_lines(lines[no_line+1:end])
i_this_line, this_line = lines[this_line]
print(repeat(" ", _depth))
m = match(Regex(split_ex), this_line)
@assert !isnothing(m) && length(m) == 3 "Unexpected format encountered on line $(i_this_line+offset) : \"$(this_line)\". Matches: $(m) Expected matches = 3"
# println(m)
i_modality, decision_str, leaf_str = m
# @show i_modality, decision_str, leaf_str
i_modality = parse(Int, i_modality)
decision = _parse_decision((i_this_line, decision_str),)
# println(clean_lines(lines[yes_line:no_line]))
# println("\n")
# println(clean_lines(lines[no_line+1:end]))
left = _parse_tree(left_tree_str; offset = yes_line-1, check_format = false, _depth = _depth + 1, child_kwargs...)
right = _parse_tree(right_tree_str; offset = no_line-1, check_format = false, _depth = _depth + 1, child_kwargs...)
if isnothing(leaf_str)
DTInternal(i_modality, decision, left, right)
else
this = _parse_leaf((i_this_line, leaf_str),)
DTInternal(i_modality, decision, this, left, right)
end
end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 6100 | """
Adapted from https://github.com/JuliaAI/DecisionTree.jl/blob/dev/src/abstract_trees.jl
"""
import AbstractTrees: children, printnode
MDT = ModalDecisionTrees
"""
Implementation of the `AbstractTrees.jl`-interface
(see: [AbstractTrees.jl](https://github.com/JuliaCollections/AbstractTrees.jl)).
The functions `children` and `printnode` make up the interface traits of `AbstractTrees.jl`
(see below for details).
The goal of this implementation is to wrap a `ModalDecisionTree` in this abstract layer,
so that a plot recipe for visualization of the tree can be created that doesn't rely
on any implementation details of `ModalDecisionTrees.jl`. That opens the possibility to create
a plot recipe which can be used by a variety of tree-like models.
For a more detailed explanation of this concept have a look at the follwing article
in "Towards Data Science":
["If things are not ready to use"](https://towardsdatascience.com/part-iii-if-things-are-not-ready-to-use-59d2db378bec)
"""
"""
InfoNode{T,S}
InfoLeaf{T}
These types are introduced so that additional information currently not present in
a `ModalDecisionTree`-structure -- for example, the names of the variables --
can be used for visualization. This additional information is stored in the variable `info` of
these types. It is a `NamedTuple`. So it can be used to store arbitraty information,
apart from the two points mentioned.
In analogy to the type definitions of `ModalDecisionTree`, the generic type `S` is
the type of the variable values used within a node as a threshold for the splits
between its children and `T` is the type of the output given (basically, a Number or a String).
"""
struct InfoNode{T,S}
node :: MDT.DTInternal{T,S}
info :: NamedTuple
end
struct InfoLeaf{T}
leaf :: MDT.AbstractDecisionLeaf{T}
info :: NamedTuple
end
"""
wrap(node::MDT.DTInternal, info = NamedTuple())
wrap(leaf::MDT.AbstractDecisionLeaf, info = NamedTuple())
Add to each `node` (or `leaf`) the additional information `info`
and wrap both in an `InfoNode`/`InfoLeaf`.
Typically a `node` or a `leaf` is obtained by creating a decision tree using either
the native interface of `ModalDecisionTrees.jl` or via other interfaces which are available
for this package (e.g., `MLJ`, see their docs for further details).
Using the function `build_tree` of the native interface returns such an object.
To use a ModalDecisionTree `mdt` (obtained this way) with the abstraction layer
provided by the `AbstractTrees`-interface implemented here
and optionally add variable names (`modality_variable_names`, an arrays of arrays of strings)
use the following syntax:
1. `wdc = wrap(mdt)`
2. `wdc = wrap(mdt, (modality_variable_names = modality_variable_names, ))`
In the first case `mdt` gets just wrapped, no information is added. No. 2 adds variable names.
Note that the trailing comma is needed, in order to create a NamedTuple.
"""
wrap(node::MDT.DTree, info::NamedTuple = NamedTuple()) = wrap(root(node), info = info)
wrap(node::MDT.DTInternal, info::NamedTuple = NamedTuple()) = InfoNode(node, info)
wrap(leaf::MDT.AbstractDecisionLeaf, info::NamedTuple = NamedTuple()) = InfoLeaf(leaf, info)
"""
children(node::InfoNode)
Return for each `node` given, its children.
In case of a `ModalDecisionTree` there are always exactly two children, because
the model produces binary trees where all nodes have exactly one left and
one right child. `children` is used for tree traversal.
The additional information `info` is carried over from `node` to its children.
"""
children(dt::MDT.DTree) = children(root(dt))
children(dt_node::MDT.DTInternal) = (
left(dt_node),
right(dt_node),
)
children(dt_leaf::MDT.AbstractDecisionLeaf) = ()
children(node::InfoNode) = (
wrap(left(node.node), node.info),
wrap(right(node.node), node.info),
)
children(leaf::InfoLeaf) = ()
"""
TODO use AbstractTrees.nodevalue when a version > 0.4 is available
"""
_nodevalue(dt_node::MDT.DTInternal) = (i_modality(dt_node), decision(dt_node))
_nodevalue(dt_leaf::MDT.AbstractDecisionLeaf) = (prediction(dt_leaf), )
_nodevalue(node::InfoNode) = _nodevalue(node.node)
_nodevalue(leaf::InfoLeaf) = _nodevalue(leaf.node)
"""
printnode(io::IO, node::InfoNode)
printnode(io::IO, leaf::InfoLeaf)
Write a printable representation of `node` or `leaf` to output-stream `io`.
If `node.info`/`leaf.info` have a field called
- `modality_variable_names` it is expected to be an array of arrays of variable names corresponding
to the variable names used in the tree nodes; note that there are two layers of reference
because variables are grouped into `modalities` (see MLJ's docs for ModalDecisionTree: @doc ModalDecisionTree)
They will be used for printing instead of the ids.
Note that the left subtree of any split node represents the 'yes-branch', while the right subtree
the 'no-branch', respectively. `print_tree` outputs the left subtree first
and then below the right subtree.
"""
function printnode(io::IO, dt_node::MDT.DTInternal)
print(io, displaydecision(dt_node))
end
function printnode(io::IO, dt_leaf::MDT.AbstractDecisionLeaf)
metrics = MDT.get_metrics(dt_leaf)
print(io, MDT.displaybriefprediction(dt_leaf), " ($(metrics.n_correct)/$(metrics.n_inst))")
end
# https://discourse.julialang.org/t/filtering-keys-out-of-named-tuples/73564
filter_nt_fields(f, nt) = NamedTuple{filter(f, keys(nt))}(nt)
function printnode(io::IO, node::InfoNode)
kwargs = filter_nt_fields(x -> x in [:variable_names_map, :threshold_display_method, :use_feature_abbreviations], node.info)
dt_node = node.node
print(io, displaydecision(dt_node; kwargs...))
end
function printnode(io::IO, leaf::InfoLeaf)
dt_leaf = leaf.leaf
metrics = MDT.get_metrics(dt_leaf)
# if :class_labels ∈ keys(leaf.info)
# print(io, leaf.info.class_labels[MDT.displaybriefprediction(dt_leaf)], " ($(metrics.n_correct)/$(metrics.n_inst))")
# else
print(io, MDT.displaybriefprediction(dt_leaf), " ($(metrics.n_correct)/$(metrics.n_inst))")
# end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 9208 | # Inspired from JuliaAI/MLJDecisionTreeInterface.jl
module MLJInterface
export ModalDecisionTree, ModalRandomForest
export depth, wrapdataset
using MLJModelInterface
using MLJModelInterface.ScientificTypesBase
using CategoricalArrays
using DataFrames
using DataStructures
using Tables
using Random
using Random: GLOBAL_RNG
using SoleLogics
using SoleLogics: AbstractRelation
using SoleData
using SoleData.MLJUtils
using SoleData: TestOperator
using SoleModels
using ModalDecisionTrees
using ModalDecisionTrees: InitialCondition
const MMI = MLJModelInterface
const MDT = ModalDecisionTrees
const _package_url = "https://github.com/giopaglia/$(MDT).jl"
include("MLJ/default-parameters.jl")
include("MLJ/sanity-checks.jl")
include("MLJ/printer.jl")
include("MLJ/wrapdataset.jl")
include("MLJ/feature-importance.jl")
include("MLJ/ModalDecisionTree.jl")
include("MLJ/ModalRandomForest.jl")
include("MLJ/docstrings.jl")
const SymbolicModel = Union{
ModalDecisionTree,
ModalRandomForest,
}
const TreeModel = Union{
ModalDecisionTree,
}
const ForestModel = Union{
ModalRandomForest,
}
include("MLJ/downsize.jl")
include("MLJ/clean.jl")
############################################################################################
############################################################################################
############################################################################################
# DecisionTree.jl (https://github.com/JuliaAI/DecisionTree.jl) is the main package
# for decision tree learning in Julia. These definitions allow for ModalDecisionTrees.jl
# to act as a drop-in replacement for DecisionTree.jl. Well, more or less.
depth(t::MDT.DTree) = height(t)
############################################################################################
############################################################################################
############################################################################################
function MMI.fit(m::SymbolicModel, verbosity::Integer, X, y, var_grouping, classes_seen=nothing, w=nothing)
# @show get_kwargs(m, X)
model = begin
if m isa ModalDecisionTree
MDT.build_tree(X, y, w; get_kwargs(m, X)...)
elseif m isa ModalRandomForest
MDT.build_forest(X, y, w; get_kwargs(m, X)...)
else
error("Unexpected model type: $(typeof(m))")
end
end
if m.post_prune
merge_purity_threshold = m.merge_purity_threshold
if isnothing(merge_purity_threshold)
if !isnothing(classes_seen)
merge_purity_threshold = Inf
else
error("Please, provide a `merge_purity_threshold` parameter (maximum MAE at splits).")
end
end
model = MDT.prune(model; simplify = true, max_performance_at_split = merge_purity_threshold)
end
verbosity < 2 || MDT.printmodel(model; max_depth = m.display_depth, variable_names_map = var_grouping)
translate_function = m->ModalDecisionTrees.translate(m, (;
# syntaxstring_kwargs = (; hidemodality = (length(var_grouping) == 1), variable_names_map = var_grouping)
))
rawmodel_full = model
rawmodel = MDT.prune(model; simplify = true)
solemodel_full = translate_function(model)
solemodel = translate_function(rawmodel)
fitresult = (
model = model,
rawmodel = rawmodel,
solemodel = solemodel,
var_grouping = var_grouping,
)
printer = ModelPrinter(m, model, solemodel, var_grouping)
cache = nothing
report = (
printmodel = printer,
sprinkle = (Xnew, ynew; simplify = false)->begin
(Xnew, ynew, var_grouping, classes_seen, w) = MMI.reformat(m, Xnew, ynew; passive_mode = true)
preds, sprinkledmodel = ModalDecisionTrees.sprinkle(model, Xnew, ynew)
if simplify
sprinkledmodel = MDT.prune(sprinkledmodel; simplify = true)
end
preds, translate_function(sprinkledmodel)
end,
# TODO remove redundancy?
model = solemodel,
model_full = solemodel_full,
rawmodel = rawmodel,
rawmodel_full = rawmodel_full,
solemodel = solemodel,
solemodel_full = solemodel_full,
var_grouping = var_grouping,
# LEGACY with JuliaIA/DecisionTree.jl
print_tree = printer,
# features = ?,
)
if !isnothing(classes_seen)
report = merge(report, (;
classes_seen = classes_seen,
))
fitresult = merge(fitresult, (;
classes_seen = classes_seen,
))
end
return fitresult, cache, report
end
MMI.fitted_params(::TreeModel, fitresult) = merge(fitresult, (; tree = fitresult.rawmodel))
MMI.fitted_params(::ForestModel, fitresult) = merge(fitresult, (; forest = fitresult.rawmodel))
############################################################################################
############################################################################################
############################################################################################
function MMI.predict(m::SymbolicModel, fitresult, Xnew, var_grouping = nothing)
if !isnothing(var_grouping) && var_grouping != fitresult.var_grouping
@warn "variable grouping differs from the one used in training! " *
"training var_grouping: $(fitresult.var_grouping)" *
"var_grouping = $(var_grouping)" *
"\n"
end
MDT.apply_proba(fitresult.rawmodel, Xnew, get(fitresult, :classes_seen, nothing); suppress_parity_warning = true)
end
############################################################################################
# DATA FRONT END
############################################################################################
function MMI.reformat(m::SymbolicModel, X, y, w = nothing; passive_mode = false)
X, var_grouping = wrapdataset(X, m; passive_mode = passive_mode)
y, classes_seen = fix_y(y)
(X, y, var_grouping, classes_seen, w)
end
MMI.selectrows(::SymbolicModel, I, X, y, var_grouping, classes_seen, w = nothing) =
(MMI.selectrows(X, I), MMI.selectrows(y, I), var_grouping, classes_seen, MMI.selectrows(w, I),)
# For predict
function MMI.reformat(m::SymbolicModel, Xnew)
Xnew, var_grouping = wrapdataset(Xnew, m; passive_mode = true)
(Xnew, var_grouping)
end
MMI.selectrows(::SymbolicModel, I, Xnew, var_grouping) =
(MMI.selectrows(Xnew, I), var_grouping,)
# MMI.fitted_params(::SymbolicModel, fitresult) = fitresult
############################################################################################
# FEATURE IMPORTANCES
############################################################################################
MMI.reports_feature_importances(::Type{<:SymbolicModel}) = true
function MMI.feature_importances(m::SymbolicModel, fitresult, report)
# generate feature importances for report
if !(m.feature_importance == :split)
error("Unexpected feature_importance encountered: $(m.feature_importance).")
end
featimportance_dict = compute_featureimportance(fitresult.rawmodel, fitresult.var_grouping; normalize=true)
featimportance_vec = collect(featimportance_dict)
sort!(featimportance_vec, rev=true, by=x->last(x))
return featimportance_vec
end
############################################################################################
# METADATA (MODEL TRAITS)
############################################################################################
MMI.metadata_pkg.(
(
ModalDecisionTree,
ModalRandomForest,
# DecisionTreeRegressor,
# RandomForestRegressor,
# AdaBoostStumpClassifier,
),
name = "$(MDT)",
package_uuid = "e54bda2e-c571-11ec-9d64-0242ac120002",
package_url = _package_url,
is_pure_julia = true,
is_wrapper=false,
package_license = "MIT",
)
for (model, human_name) in [
(ModalDecisionTree, "Modal Decision Tree"),
(ModalRandomForest, "Modal Random Forest"),
]
MMI.metadata_model(
model,
input_scitype = Union{
Table(
Continuous, AbstractArray{<:Continuous,0}, AbstractArray{<:Continuous,1}, AbstractArray{<:Continuous,2},
Count, AbstractArray{<:Count,0}, AbstractArray{<:Count,1}, AbstractArray{<:Count,2},
OrderedFactor, AbstractArray{<:OrderedFactor,0}, AbstractArray{<:OrderedFactor,1}, AbstractArray{<:OrderedFactor,2},
),
},
target_scitype = Union{
AbstractVector{<:Multiclass},
AbstractVector{<:Continuous},
AbstractVector{<:Count},
AbstractVector{<:Finite},
AbstractVector{<:Textual}
},
human_name = human_name,
supports_weights = true,
load_path = "$MDT.$(model)",
)
end
end
using .MLJInterface
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 5299 | mutable struct ModalDecisionTree <: MMI.Probabilistic
## Pruning conditions
max_depth :: Union{Nothing,Int}
min_samples_leaf :: Union{Nothing,Int}
min_purity_increase :: Union{Nothing,Float64}
max_purity_at_leaf :: Union{Nothing,Float64}
max_modal_depth :: Union{Nothing,Int}
## Logic parameters
# Relation set
relations :: Union{
Nothing, # defaults to a well-known relation set, depending on the data;
Symbol, # one of the relation sets specified in AVAILABLE_RELATIONS;
Vector{<:AbstractRelation}, # explicitly specify the relation set;
# Vector{<:Union{Symbol,Vector{<:AbstractRelation}}}, # MULTIMODAL CASE: specify a relation set for each modality;
Function # A function worldtype -> relation set.
}
# Condition set
features :: Union{
Nothing, # defaults to scalar conditions (with ≥ and <) on well-known feature functions (e.g., minimum, maximum), applied to all variables;
Vector{<:Union{SoleData.VarFeature,Base.Callable}}, # scalar conditions with ≥ and <, on an explicitly specified feature set (callables to be applied to each variable, or VarFeature objects);
Vector{<:Tuple{Base.Callable,Integer}}, # scalar conditions with ≥ and <, on a set of features specified as a set of callables to be applied to a set of variables each;
Vector{<:Tuple{TestOperator,<:Union{SoleData.VarFeature,Base.Callable}}}, # explicitly specify the pairs (test operator, feature);
Vector{<:SoleData.ScalarMetaCondition}, # explicitly specify the scalar condition set.
}
conditions :: Union{
Nothing, # defaults to scalar conditions (with ≥ and <) on well-known feature functions (e.g., minimum, maximum), applied to all variables;
Vector{<:Union{SoleData.VarFeature,Base.Callable}}, # scalar conditions with ≥ and <, on an explicitly specified feature set (callables to be applied to each variable, or VarFeature objects);
Vector{<:Tuple{Base.Callable,Integer}}, # scalar conditions with ≥ and <, on a set of features specified as a set of callables to be applied to a set of variables each;
Vector{<:Tuple{TestOperator,<:Union{SoleData.VarFeature,Base.Callable}}}, # explicitly specify the pairs (test operator, feature);
Vector{<:SoleData.ScalarMetaCondition}, # explicitly specify the scalar condition set.
}
# Type for the extracted feature values
featvaltype :: Type
# Initial conditions
initconditions :: Union{
Nothing, # defaults to standard conditions (e.g., start_without_world)
Symbol, # one of the initial conditions specified in AVAILABLE_INITIALCONDITIONS;
InitialCondition, # explicitly specify an initial condition for the learning algorithm.
}
## Miscellaneous
downsize :: Union{Bool,NTuple{N,Integer} where N,Function}
print_progress :: Bool
rng :: Union{Random.AbstractRNG,Integer}
## DecisionTree.jl parameters
display_depth :: Union{Nothing,Int}
min_samples_split :: Union{Nothing,Int}
n_subfeatures :: Union{Nothing,Int,Float64,Function}
post_prune :: Bool
merge_purity_threshold :: Union{Nothing,Float64}
feature_importance :: Symbol
end
# keyword constructor
function ModalDecisionTree(;
max_depth = nothing,
min_samples_leaf = nothing,
min_purity_increase = nothing,
max_purity_at_leaf = nothing,
max_modal_depth = nothing,
#
relations = nothing,
features = nothing,
conditions = nothing,
featvaltype = Float64,
initconditions = nothing,
#
downsize = true,
print_progress = false,
rng = Random.GLOBAL_RNG,
#
display_depth = nothing,
min_samples_split = nothing,
n_subfeatures = nothing,
post_prune = false,
merge_purity_threshold = nothing,
feature_importance = :split,
)
model = ModalDecisionTree(
max_depth,
min_samples_leaf,
min_purity_increase,
max_purity_at_leaf,
max_modal_depth,
#
relations,
features,
conditions,
featvaltype,
initconditions,
#
downsize,
print_progress,
rng,
#
display_depth,
min_samples_split,
n_subfeatures,
post_prune,
merge_purity_threshold,
feature_importance,
)
message = MMI.clean!(model)
isempty(message) || @warn message
return model
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 5476 | mutable struct ModalRandomForest <: MMI.Probabilistic
sampling_fraction :: Float64
ntrees :: Int
## Pruning conditions
max_depth :: Union{Nothing,Int}
min_samples_leaf :: Union{Nothing,Int}
min_purity_increase :: Union{Nothing,Float64}
max_purity_at_leaf :: Union{Nothing,Float64}
max_modal_depth :: Union{Nothing,Int}
## Logic parameters
# Relation set
relations :: Union{
Nothing, # defaults to a well-known relation set, depending on the data;
Symbol, # one of the relation sets specified in AVAILABLE_RELATIONS;
Vector{<:AbstractRelation}, # explicitly specify the relation set;
# Vector{<:Union{Symbol,Vector{<:AbstractRelation}}}, # MULTIMODAL CASE: specify a relation set for each modality;
Function # A function worldtype -> relation set.
}
# Condition set
features :: Union{
Nothing, # defaults to scalar conditions (with ≥ and <) on well-known feature functions (e.g., minimum, maximum), applied to all variables;
Vector{<:Union{SoleData.VarFeature,Base.Callable}}, # scalar conditions with ≥ and <, on an explicitly specified feature set (callables to be applied to each variable, or VarFeature objects);
Vector{<:Tuple{Base.Callable,Integer}}, # scalar conditions with ≥ and <, on a set of features specified as a set of callables to be applied to a set of variables each;
Vector{<:Tuple{TestOperator,<:Union{SoleData.VarFeature,Base.Callable}}}, # explicitly specify the pairs (test operator, feature);
Vector{<:SoleData.ScalarMetaCondition}, # explicitly specify the scalar condition set.
}
conditions :: Union{
Nothing, # defaults to scalar conditions (with ≥ and <) on well-known feature functions (e.g., minimum, maximum), applied to all variables;
Vector{<:Union{SoleData.VarFeature,Base.Callable}}, # scalar conditions with ≥ and <, on an explicitly specified feature set (callables to be applied to each variable, or VarFeature objects);
Vector{<:Tuple{Base.Callable,Integer}}, # scalar conditions with ≥ and <, on a set of features specified as a set of callables to be applied to a set of variables each;
Vector{<:Tuple{TestOperator,<:Union{SoleData.VarFeature,Base.Callable}}}, # explicitly specify the pairs (test operator, feature);
Vector{<:SoleData.ScalarMetaCondition}, # explicitly specify the scalar condition set.
}
# Type for the extracted feature values
featvaltype :: Type
# Initial conditions
initconditions :: Union{
Nothing, # defaults to standard conditions (e.g., start_without_world)
Symbol, # one of the initial conditions specified in AVAILABLE_INITIALCONDITIONS;
InitialCondition, # explicitly specify an initial condition for the learning algorithm.
}
## Miscellaneous
downsize :: Union{Bool,NTuple{N,Integer} where N,Function}
print_progress :: Bool
rng :: Union{Random.AbstractRNG,Integer}
## DecisionTree.jl parameters
display_depth :: Union{Nothing,Int}
min_samples_split :: Union{Nothing,Int}
n_subfeatures :: Union{Nothing,Int,Float64,Function}
post_prune :: Bool
merge_purity_threshold :: Union{Nothing,Float64}
feature_importance :: Symbol
end
# keyword constructor
function ModalRandomForest(;
sampling_fraction = 0.7,
ntrees = 10,
max_depth = nothing,
min_samples_leaf = nothing,
min_purity_increase = nothing,
max_purity_at_leaf = nothing,
max_modal_depth = nothing,
#
relations = nothing,
features = nothing,
conditions = nothing,
featvaltype = Float64,
initconditions = nothing,
#
downsize = true,
print_progress = (ntrees > 50),
rng = Random.GLOBAL_RNG,
#
display_depth = nothing,
min_samples_split = nothing,
n_subfeatures = nothing,
post_prune = false,
merge_purity_threshold = nothing,
feature_importance = :split,
)
model = ModalRandomForest(
sampling_fraction,
ntrees,
#
max_depth,
min_samples_leaf,
min_purity_increase,
max_purity_at_leaf,
max_modal_depth,
#
relations,
features,
conditions,
featvaltype,
initconditions,
#
downsize,
print_progress,
rng,
#
display_depth,
min_samples_split,
n_subfeatures,
post_prune,
merge_purity_threshold,
feature_importance,
)
message = MMI.clean!(model)
isempty(message) || @warn message
return model
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 11129 |
function get_kwargs(m::SymbolicModel, X)
base_kwargs = (;
loss_function = nothing,
max_depth = m.max_depth,
min_samples_leaf = m.min_samples_leaf,
min_purity_increase = m.min_purity_increase,
max_purity_at_leaf = m.max_purity_at_leaf,
max_modal_depth = m.max_modal_depth,
####################################################################################
n_subrelations = identity,
n_subfeatures = m.n_subfeatures,
initconditions = readinitconditions(m, X),
allow_global_splits = ALLOW_GLOBAL_SPLITS,
####################################################################################
use_minification = false,
perform_consistency_check = false,
####################################################################################
rng = m.rng,
print_progress = m.print_progress,
)
additional_kwargs = begin
if m isa TreeModel
(;)
elseif m isa ForestModel
(;
partial_sampling = m.sampling_fraction,
ntrees = m.ntrees,
suppress_parity_warning = true,
)
else
error("Unexpected model type: $(typeof(m))")
end
end
merge(base_kwargs, additional_kwargs)
end
function MMI.clean!(m::SymbolicModel)
warning = ""
if m isa TreeModel
mlj_default_min_samples_leaf = mlj_mdt_default_min_samples_leaf
mlj_default_min_purity_increase = mlj_mdt_default_min_purity_increase
mlj_default_max_purity_at_leaf = mlj_mdt_default_max_purity_at_leaf
mlj_default_n_subfeatures = mlj_mdt_default_n_subfeatures
elseif m isa ForestModel
mlj_default_min_samples_leaf = mlj_mrf_default_min_samples_leaf
mlj_default_min_purity_increase = mlj_mrf_default_min_purity_increase
mlj_default_max_purity_at_leaf = mlj_mrf_default_max_purity_at_leaf
mlj_default_n_subfeatures = mlj_mrf_default_n_subfeatures
mlj_default_ntrees = mlj_mrf_default_ntrees
mlj_default_sampling_fraction = mlj_mrf_default_sampling_fraction
else
error("Unexpected model type: $(typeof(m))")
end
if !(isnothing(m.max_depth) || m.max_depth ≥ -1)
warning *= "max_depth must be ≥ -1, but $(m.max_depth) " *
"was provided. Defaulting to $(mlj_default_max_depth).\n"
m.max_depth = mlj_default_max_depth
end
if !(isnothing(m.min_samples_leaf) || m.min_samples_leaf ≥ 1)
warning *= "min_samples_leaf must be ≥ 1, but $(m.min_samples_leaf) " *
"was provided. Defaulting to $(mlj_default_min_samples_leaf).\n"
m.min_samples_leaf = mlj_default_min_samples_leaf
end
if !(isnothing(m.max_modal_depth) || m.max_modal_depth ≥ -1)
warning *= "max_modal_depth must be ≥ -1, but $(m.max_modal_depth) " *
"was provided. Defaulting to $(mlj_default_max_modal_depth).\n"
m.max_modal_depth = mlj_default_max_depth
end
# Patch parameters: -1 -> nothing
m.max_depth == -1 && (m.max_depth = nothing)
m.max_modal_depth == -1 && (m.max_modal_depth = nothing)
m.display_depth == -1 && (m.display_depth = nothing)
# Patch parameters: nothing -> default value
isnothing(m.max_depth) && (m.max_depth = mlj_default_max_depth)
isnothing(m.min_samples_leaf) && (m.min_samples_leaf = mlj_default_min_samples_leaf)
isnothing(m.min_purity_increase) && (m.min_purity_increase = mlj_default_min_purity_increase)
isnothing(m.max_purity_at_leaf) && (m.max_purity_at_leaf = mlj_default_max_purity_at_leaf)
isnothing(m.max_modal_depth) && (m.max_modal_depth = mlj_default_max_modal_depth)
########################################################################################
########################################################################################
########################################################################################
if !(isnothing(m.relations) ||
m.relations isa Symbol && m.relations in keys(AVAILABLE_RELATIONS) ||
m.relations isa Vector{<:AbstractRelation} ||
m.relations isa Function
)
warning *= "relations should be in $(collect(keys(AVAILABLE_RELATIONS))) " *
"or a vector of SoleLogics.AbstractRelation's, " *
"but $(m.relations) " *
"was provided. Defaulting to $(mlj_default_relations_str).\n"
m.relations = nothing
end
isnothing(m.relations) && (m.relations = mlj_default_relations)
m.relations isa Vector{<:AbstractRelation} && (m.relations = m.relations)
# Patch name: features -> conditions
if !isnothing(m.features)
if !isnothing(m.conditions)
error("Please, only specify one hyper-parameter in `features` and `conditions`." *
"Given: features = $(m.features) & conditions = $(m.conditions).")
end
m.conditions = m.features
m.features = nothing
end
if !(isnothing(m.conditions) ||
m.conditions isa Vector{<:Union{SoleData.VarFeature,Base.Callable}} ||
m.conditions isa Vector{<:Tuple{Base.Callable,Integer}} ||
m.conditions isa Vector{<:Tuple{TestOperator,<:Union{SoleData.VarFeature,Base.Callable}}} ||
m.conditions isa Vector{<:SoleData.ScalarMetaCondition}
)
warning *= "conditions should be either:" *
"a) a vector of features (i.e., callables to be associated to all variables, or SoleData.VarFeature objects);\n" *
"b) a vector of tuples (callable,var_id);\n" *
"c) a vector of tuples (test_operator,features);\n" *
"d) a vector of SoleData.ScalarMetaCondition;\n" *
"but $(m.conditions) " *
"was provided. Defaulting to $(mlj_default_conditions_str).\n"
m.conditions = nothing
end
isnothing(m.conditions) && (m.conditions = mlj_default_conditions)
if !(isnothing(m.initconditions) ||
m.initconditions isa Symbol && m.initconditions in keys(AVAILABLE_INITCONDITIONS) ||
m.initconditions isa InitialCondition
)
warning *= "initconditions should be in $(collect(keys(AVAILABLE_INITCONDITIONS))), " *
"but $(m.initconditions) " *
"was provided. Defaulting to $(mlj_default_initconditions_str).\n"
m.initconditions = nothing
end
isnothing(m.initconditions) && (m.initconditions = mlj_default_initconditions)
########################################################################################
########################################################################################
########################################################################################
m.downsize = begin
if m.downsize == true
make_downsizing_function(m)
elseif m.downsize == false
identity
elseif m.downsize isa NTuple{N,Integer} where N
make_downsizing_function(m.downsize)
elseif m.downsize isa Function
m.downsize
else
error("Unexpected value for `downsize` encountered: $(m.downsize)")
end
end
if m.rng isa Integer
m.rng = Random.MersenneTwister(m.rng)
end
########################################################################################
########################################################################################
########################################################################################
if !(isnothing(m.min_samples_split) || m.min_samples_split ≥ 2)
warning *= "min_samples_split must be ≥ 2, but $(m.min_samples_split) " *
"was provided. Defaulting to $(nothing).\n"
m.min_samples_split = nothing
end
# Note:
# (min_samples_leaf * 2 > ninstances) || (min_samples_split > ninstances) ⇔
# (max(min_samples_leaf * 2, min_samples_split) > ninstances) ⇔
# (max(min_samples_leaf, div(min_samples_split, 2)) * 2 > ninstances)
if !isnothing(m.min_samples_split)
m.min_samples_leaf = max(m.min_samples_leaf, div(m.min_samples_split, 2))
end
if m.n_subfeatures isa Integer && !(m.n_subfeatures > 0)
warning *= "n_subfeatures must be > 0, but $(m.n_subfeatures) " *
"was provided. Defaulting to $(nothing).\n"
m.n_subfeatures = nothing
end
# Legacy behaviour
m.n_subfeatures == -1 && (m.n_subfeatures = sqrt_f)
m.n_subfeatures == 0 && (m.n_subfeatures = identity)
function make_n_subfeatures_function(n_subfeatures)
if isnothing(n_subfeatures)
mlj_default_n_subfeatures
elseif n_subfeatures isa Integer
warning *= "An absolute n_subfeatures was provided $(n_subfeatures). " *
"It is recommended to use relative values (between 0 and 1), interpreted " *
"as the share of the random portion of feature space explored at each split."
x -> convert(Int64, n_subfeatures)
elseif n_subfeatures isa AbstractFloat
@assert 0 ≤ n_subfeatures ≤ 1 "Unexpected value for " *
"n_subfeatures: $(n_subfeatures). It should be ∈ [0,1]"
x -> ceil(Int64, x*n_subfeatures)
elseif n_subfeatures isa Function
# x -> ceil(Int64, n_subfeatures(x)) # Generates too much nesting
n_subfeatures
else
error("Unexpected value for n_subfeatures: $(n_subfeatures) " *
"(type: $(typeof(n_subfeatures)))")
end
end
m.n_subfeatures = make_n_subfeatures_function(m.n_subfeatures)
# Only true for classification:
# if !(0 ≤ m.merge_purity_threshold ≤ 1)
# warning *= "merge_purity_threshold should be between 0 and 1, " *
# "but $(m.merge_purity_threshold) " *
# "was provided.\n"
# end
if m.feature_importance == :impurity
warning *= "feature_importance = :impurity is currently not supported." *
"Defaulting to $(:split).\n"
m.feature_importance == :split
end
if !(m.feature_importance in [:split])
warning *= "feature_importance should be in [:split], " *
"but $(m.feature_importance) " *
"was provided.\n"
end
if m isa ForestModel
isnothing(m.sampling_fraction) && (m.sampling_fraction = mlj_default_sampling_fraction)
if !(0 ≤ m.sampling_fraction ≤ 1)
warning *= "sampling_fraction should be ∈ [0,1], " *
"but $(m.sampling_fraction) " *
"was provided.\n"
end
isnothing(m.ntrees) && (m.ntrees = mlj_default_ntrees)
if !(m.ntrees > 0)
warning *= "ntrees should be > 0, " *
"but $(m.ntrees) " *
"was provided.\n"
end
end
return warning
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 7146 | using SoleData.DimensionalDatasets
using SoleData.DimensionalDatasets: UniformFullDimensionalLogiset
using SoleData: ScalarOneStepMemoset, AbstractFullMemoset
using SoleData: naturalconditions
const ALLOW_GLOBAL_SPLITS = true
const mlj_default_max_depth = nothing
const mlj_default_max_modal_depth = nothing
const mlj_mdt_default_min_samples_leaf = 4
const mlj_mdt_default_min_purity_increase = 0.002
const mlj_mdt_default_max_purity_at_leaf = Inf
const mlj_mdt_default_n_subfeatures = identity
const mlj_mrf_default_min_samples_leaf = 1
const mlj_mrf_default_min_purity_increase = -Inf
const mlj_mrf_default_max_purity_at_leaf = Inf
const mlj_mrf_default_ntrees = 50
sqrt_f(x) = ceil(Int, sqrt(x))
const mlj_mrf_default_n_subfeatures = sqrt_f
const mlj_mrf_default_sampling_fraction = 0.7
AVAILABLE_RELATIONS = OrderedDict{Symbol,Function}([
:none => (d)->AbstractRelation[],
:IA => (d)->[globalrel, (d == 1 ? SoleLogics.IARelations : (d == 2 ? SoleLogics.IA2DRelations : error("Unexpected dimensionality ($d).")))...],
:IA3 => (d)->[globalrel, (d == 1 ? SoleLogics.IA3Relations : (d == 2 ? SoleLogics.IA32DRelations : error("Unexpected dimensionality ($d).")))...],
:IA7 => (d)->[globalrel, (d == 1 ? SoleLogics.IA7Relations : (d == 2 ? SoleLogics.IA72DRelations : error("Unexpected dimensionality ($d).")))...],
:RCC5 => (d)->[globalrel, SoleLogics.RCC5Relations...],
:RCC8 => (d)->[globalrel, SoleLogics.RCC8Relations...],
])
mlj_default_relations = nothing
mlj_default_relations_str = "either no relation (adimensional data), " *
"IA7 interval relations (1- and 2-dimensional data)."
# , or RCC5 relations " *
# "(2-dimensional data)."
function defaultrelations(dataset, relations)
# @show typeof(dataset)
if dataset isa Union{
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}} where {W,U,FT,FR,L,N},
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}} where {W,U,FT,FR,L,N},
}
if relations == mlj_default_relations
MDT.relations(dataset)
else
error("Unexpected dataset type: $(typeof(dataset)).")
end
else
symb = begin
if relations isa Symbol
relations
elseif dimensionality(dataset) == 0
:none
elseif dimensionality(dataset) == 1
:IA7
elseif dimensionality(dataset) == 2
:IA7
# :RCC8
else
error("Cannot infer relation set for dimensionality $(repr(dimensionality(dataset))). " *
"Dimensionality should be 0, 1 or 2.")
end
end
d = dimensionality(dataset)
if d == 0
AVAILABLE_RELATIONS[:none](d)
else
AVAILABLE_RELATIONS[symb](d)
end
end
end
# Infer relation set from model.relations parameter and the (unimodal) dataset.
function readrelations(model, dataset)
if model.relations == mlj_default_relations || model.relations isa Symbol
defaultrelations(dataset, model.relations)
else
if dataset isa Union{
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}} where {W,U,FT,FR,L,N},
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}} where {W,U,FT,FR,L,N},
}
rels = model.relations(dataset)
@assert issubset(rels, MDT.relations(dataset)) "Could not find " *
"specified relations $(SoleLogics.displaysyntaxvector(rels)) in " *
"logiset relations $(SoleLogics.displaysyntaxvector(MDT.relations(dataset)))."
rels
else
model.relations(dataset)
end
end
end
mlj_default_conditions = nothing
mlj_default_conditions_str = "scalar conditions (test operators ≥ and <) " *
"on either minimum and maximum feature functions (if dimensional data is provided), " *
"or the features of the logiset, if one is provided."
function defaultconditions(dataset)
if dataset isa Union{
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}} where {W,U,FT,FR,L,N},
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}} where {W,U,FT,FR,L,N},
}
MDT.metaconditions(dataset)
elseif dataset isa UniformFullDimensionalLogiset
vcat([
[
ScalarMetaCondition(feature, ≥),
(all(i_instance->SoleData.nworlds(frame(dataset, i_instance)) == 1, 1:ninstances(dataset)) ?
[] :
[ScalarMetaCondition(feature, <)]
)...
]
for feature in features(dataset)]...)
else
if all(i_instance->SoleData.nworlds(frame(dataset, i_instance)) == 1, 1:ninstances(dataset))
[identity]
else
[minimum, maximum]
end
end
end
function readconditions(model, dataset)
conditions = begin
if model.conditions == mlj_default_conditions
defaultconditions(dataset)
else
model.conditions
end
end
if dataset isa Union{
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset}} where {W,U,FT,FR,L,N},
SupportedLogiset{W,U,FT,FR,L,N,<:Tuple{<:ScalarOneStepMemoset,<:AbstractFullMemoset}} where {W,U,FT,FR,L,N},
}
@assert issubset(conditions, MDT.metaconditions(dataset)) "Could not find " *
"specified conditions $(SoleLogics.displaysyntaxvector(conditions)) in " *
"logiset metaconditions $(SoleLogics.displaysyntaxvector(MDT.metaconditions(dataset)))."
conditions
else
# @show typeof(dataset)
naturalconditions(dataset, conditions, model.featvaltype)
end
end
mlj_default_initconditions = nothing
mlj_default_initconditions_str = "" *
":start_with_global" # (i.e., starting with a global decision, such as ⟨G⟩ min(V1) > 2) " *
# "for 1-dimensional data and :start_at_center for 2-dimensional data."
AVAILABLE_INITCONDITIONS = OrderedDict{Symbol,InitialCondition}([
:start_with_global => MDT.start_without_world,
:start_at_center => MDT.start_at_center,
])
function readinitconditions(model, dataset)
if SoleData.ismultilogiseed(dataset)
map(mod->readinitconditions(model, mod), eachmodality(dataset))
else
if model.initconditions == mlj_default_initconditions
# d = dimensionality(SoleData.base(dataset)) # ? TODO maybe remove base for AbstractModalLogiset's?
d = dimensionality(frame(dataset, 1))
if d == 0
AVAILABLE_INITCONDITIONS[:start_with_global]
elseif d == 1
AVAILABLE_INITCONDITIONS[:start_with_global]
elseif d == 2
AVAILABLE_INITCONDITIONS[:start_with_global]
else
error("Unexpected dimensionality: $(d)")
end
else
AVAILABLE_INITCONDITIONS[model.initconditions]
end
end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 17017 |
# # DOCUMENT STRINGS
# "The model is probabilistic, symbolic model " *
# "for classification and regression tasks with dimensional data " *
# "(e.g., images and time-series)." *
descr = """
The symbolic, probabilistic model is able to extract logical descriptions of the data
in terms of logical formulas
(see [SoleLogics.jl](https://github.com/aclai-lab/SoleLogics.jl)) on atoms that are
scalar conditions on the variables (or features);
for example, min[V2] ≥ 10, that is, "the minimum of variable 2 is not less than 10".
As such, the model is suitable for tasks that involve non-scalar data,
but require some level of interpretable and transparent modeling.
At the moment, the only loss functions available are Shannon's entropy (classification) and variance (regression).
"""
# TODO link in docstring?
# [SoleLogics.jl](https://github.com/aclai-lab/SoleLogics.jl)) on atoms that are
const MDT_ref = "" *
"Manzella et al. (2021). \"Interval Temporal Random Forests with an " *
"Application to COVID-19 Diagnosis\". 10.4230/LIPIcs.TIME.2021.7"
const DOC_RANDOM_FOREST = "[Random Forest algorithm]" *
"(https://en.wikipedia.org/wiki/Random_forest), originally published in " *
"Breiman, L. (2001): \"Random Forests.\", *Machine Learning*, vol. 45, pp. 5–32"
function docstring_piece_1(
T::Type
)
if T <: ModalDecisionTree
default_min_samples_leaf = mlj_mdt_default_min_samples_leaf
default_min_purity_increase = mlj_mdt_default_min_purity_increase
default_max_purity_at_leaf = mlj_mdt_default_max_purity_at_leaf
forest_hyperparams_str = ""
n_subfeatures_str =
"""
- `n_subfeatures=0`: Number of features to select at random at each node (0 for all),
or a Function that outputs this number, given a number of available features.
"""
elseif T <: ModalRandomForest
default_min_samples_leaf = mlj_mrf_default_min_samples_leaf
default_min_purity_increase = mlj_mrf_default_min_purity_increase
default_max_purity_at_leaf = mlj_mrf_default_max_purity_at_leaf
forest_hyperparams_str =
"""
- `n_trees=10`: number of trees to train
- `sampling_fraction=0.7` fraction of samples to train each tree on
"""
n_subfeatures_str =
"""
- `n_subfeatures`: Number of features to select at random at each node (0 for all),
or a Function that outputs this number, given a number of available features.
Defaulted to `ceil(Int, sqrt(x))`.
"""
else
error("Unexpected model type: $(typeof(m))")
end
"""
Modal C4.5. This classification and regression algorithm, originally presented in $MDT_ref,
is an extension of the CART and C4.5
[decision tree learning algorithms](https://en.wikipedia.org/wiki/Decision_tree_learning)
that leverages the expressive power of modal logics of time and space
to perform temporal/spatial reasoning on non-scalar data, such as time-series and images.
$(descr)
# Training data
In MLJ or MLJBase, bind an instance `model` to data with
mach = machine(model, X, y)
where
- `X`: any table of input features (e.g., a `DataFrame`) whose columns
each have one of the following element scitypes: `Continuous`,
`Count`, `OrderedFactor`, or any 0-, 1-, 2-dimensional array with elements
of these scitypes; check column scitypes with `schema(X)`
- `y`: is the target, which can be any `AbstractVector` whose element
scitype is `Multiclass`, `Continuous`, `Finite`, or `Textual`; check the scitype
with `scitype(y)`
Train the machine with `fit!(mach)`.
# Hyper-parameters
$(forest_hyperparams_str)
- `max_depth=-1`: Maximum depth of the decision tree (-1=any)
- `min_samples_leaf=$(default_min_samples_leaf)`: Minimum number of samples required at each leaf
- `min_purity_increase=$(default_min_purity_increase)`: Minimum value for the loss function needed for a split
- `max_purity_at_leaf=$(default_max_purity_at_leaf)`: Minimum value for the loss function needed for a split
- `max_modal_depth=-1`: Maximum modal depth of the decision tree (-1=any). When this depth is reached, only propositional decisions are taken.
$(n_subfeatures_str)
- `feature=[minimum, maximum]` Feature functions to be used by the tree to mine scalar conditions (e.g., `minimum[V2] ≥ 10`)
- `featvaltype=Float64` Output type for feature functions, when it cannot be inferred (e.g., with custom feature functions provided).
- `relations=nothing` Relations that the model uses to look for patterns; it can be a symbol in [:IA, :IA3, :IA7, :RCC5, :RCC8],
where :IA stands for [Allen's Interval Algebra](https://en.wikipedia.org/wiki/Allen%27s_interval_algebra) (13 relations in 1D, 169 relations in 2D),
:IA3 and :IA7 are [coarser fragments with 3 and 7 relations, respectively](https://www.sciencedirect.com/science/article/pii/S0004370218305964),
:RCC5 and :RCC8 are [Region Connection Calculus algebras](https://en.wikipedia.org/wiki/Region_connection_calculus) with 5 and 8 topological operators, respectively.
Relations from :IA, :IA3, :IA7, capture directional aspects of the relative arrangement of two intervals in time (or rectangles in a 2D space),
while relations from :RCC5 and :RCC8 only capture topological aspects and are therefore rotation and flip-invariant.
This hyper-parameter defaults to $(mlj_default_relations_str).
- `initconditions=nothing` initial conditions for evaluating modal decisions at the root; it can be a symbol in [:start_with_global, :start_at_center].
:start_with_global forces the first decision to be a *global* decision (e.g., `⟨G⟩ (minimum[V2] ≥ 10)`, which translates to "there exists a region where the minimum of variable 2 is higher than 10").
:start_at_center forces the first decision to be evaluated on the smallest central world, that is, the central value of a time-series, or the central pixel of an image.
This hyper-parameter defaults to $(mlj_default_initconditions_str).
- `downsize=true` Whether to perform automatic downsizing, by means of moving average. In fact, this algorithm has high complexity
(both time and space), and can only handle small time-series (< 100 points) & small images (< 10 x 10 pixels).
When set to `true`, automatic downsizing is performed; when it is an `NTuple` of `Integer`s, a downsizing of dimensional data
to match that size is performed.
- `print_progress=false`: set to `true` for a progress bar
- `post_prune=false`: set to `true` for post-fit pruning
- `merge_purity_threshold=1.0`: (post-pruning) merge leaves having
combined purity `>= merge_purity_threshold`
- `display_depth=5`: max depth to show when displaying the tree(s)
- `rng=Random.GLOBAL_RNG`: random number generator or seed
"""
end
"""
$(MMI.doc_header(ModalDecisionTree))
`ModalDecisionTree` implements
$(docstring_piece_1(ModalDecisionTree))
- `display_depth=5`: max depth to show when displaying the tree
# Operations
- `predict(mach, Xnew)`: return predictions of the target given
features `Xnew` having the same scitype as `X` above.
# Fitted parameters
The fields of `fitted_params(mach)` are:
- `model`: the tree object, as returned by the core algorithm
- `var_grouping`: the adopted grouping of the features encountered in training, in an order consistent with the output of `printmodel`.
The MLJ interface can currently deal with scalar, temporal and spatial features, but
has one limitation, and one tricky procedure for handling them at the same time.
The limitation is for temporal and spatial features to be uniform in size across the instances (the algorithm will automatically throw away features that do not satisfy this constraint).
As for the tricky procedure: before the learning phase, features are divided into groups (referred to as `modalities`) according to each variable's `channel size`, that is, the size of the vector or matrix.
For example, if X is multimodal, and has three temporal features :x, :y, :z with 10, 10 and 20 points, respectively,
plus three spatial features :R, :G, :B, with the same size 5 × 5 pixels, the algorithm assumes that :x and :y share a temporal axis,
:R, :G, :B share two spatial axis, while :z does not share any axis with any other variable. As a result,
the model will group features into three modalities:
- {1} [:x, :y]
- {2} [:z]
- {3} [:R, :G, :B]
and `var_grouping` will be [["x", "y"], ["z"], ["R", "G", "B"]].
"R", "G", "B"]
# Report
The fields of `report(mach)` are:
- `printmodel`: method to print a pretty representation of the fitted
model, with single argument the tree depth. The interpretation of the tree requires you
to understand how the current MLJ interface of ModalDecisionTrees.jl handles features of different modals.
See `var_grouping` above. Note that the split conditions (or decisions) in the tree are relativized to a specific modality, of which the number is shown.
- `var_grouping`: the adopted grouping of the features encountered in training, in an order consistent with the output of `printmodel`.
See `var_grouping` above.
- `feature_importance_by_count`: a simple count of each of the occurrences of the features across the model, in an order consistent with `var_grouping`.
- `classes_seen`: list of target classes actually observed in training.
# Examples
```julia
using MLJ
using ModalDecisionTrees
using Random
tree = ModalDecisionTree(min_samples_leaf=4)
# Load an example dataset (a temporal one)
X, y = ModalDecisionTrees.load_japanesevowels()
N = length(y)
mach = machine(tree, X, y)
# Split dataset
p = randperm(N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
# Fit
fit!(mach, rows=train_idxs)
# Perform predictions, compute accuracy
yhat = predict_mode(mach, X[test_idxs,:])
accuracy = MLJ.accuracy(yhat, y[test_idxs])
# Access raw model
fitted_params(mach).model
report(mach).printmodel(3)
"{1} ⟨G⟩ (max[coefficient1] <= 0.883491) 3 : 91/512 (conf = 0.1777)
✔ {1} ⟨G⟩ (max[coefficient9] <= -0.157292) 3 : 89/287 (conf = 0.3101)
│✔ {1} ⟨L̅⟩ (max[coefficient6] <= -0.504503) 3 : 89/209 (conf = 0.4258)
││✔ {1} ⟨A⟩ (max[coefficient3] <= 0.220312) 3 : 81/93 (conf = 0.8710)
[...]
││✘ {1} ⟨L̅⟩ (max[coefficient1] <= 0.493004) 8 : 47/116 (conf = 0.4052)
[...]
│✘ {1} ⟨A⟩ (max[coefficient2] <= -0.285645) 7 : 41/78 (conf = 0.5256)
│ ✔ {1} min[coefficient3] >= 0.002931 4 : 34/36 (conf = 0.9444)
[...]
│ ✘ {1} ⟨G⟩ (min[coefficient5] >= 0.18312) 7 : 39/42 (conf = 0.9286)
[...]
✘ {1} ⟨G⟩ (max[coefficient3] <= 0.006087) 5 : 51/225 (conf = 0.2267)
✔ {1} ⟨D⟩ (max[coefficient2] <= -0.301233) 5 : 51/102 (conf = 0.5000)
│✔ {1} ⟨D̅⟩ (max[coefficient3] <= -0.123654) 5 : 51/65 (conf = 0.7846)
[...]
│✘ {1} ⟨G⟩ (max[coefficient9] <= -0.146962) 7 : 16/37 (conf = 0.4324)
[...]
✘ {1} ⟨G⟩ (max[coefficient9] <= -0.424346) 1 : 47/123 (conf = 0.3821)
✔ {1} min[coefficient1] >= 1.181048 6 : 39/40 (conf = 0.9750)
[...]
✘ {1} ⟨G⟩ (min[coefficient4] >= -0.472485) 1 : 47/83 (conf = 0.5663)
[...]"
```
"""
ModalDecisionTree
"""
$(MMI.doc_header(ModalRandomForest))
`ModalRandomForest` implements the standard $DOC_RANDOM_FOREST, based on
$(docstring_piece_1(ModalRandomForest))
- `n_subrelations=identity` Number of relations to randomly select at any point of the tree. Must be a function of the number of the available relations. It defaults to `identity`, that is, consider all available relations.
- `n_subfeatures=x -> ceil(Int64, sqrt(x))` Number of functions to randomly select at any point of the tree. Must be a function of the number of the available functions. It defaults to `x -> ceil(Int64, sqrt(x))`, that is, consider only about square root of the available functions.
- `ntrees=$(mlj_mrf_default_ntrees)` Number of trees in the forest.
- `sampling_fraction=0.7` Fraction of samples to train each tree on.
- `rng=Random.GLOBAL_RNG` Random number generator or seed.
# Operations
- `predict(mach, Xnew)`: return predictions of the target given
features `Xnew` having the same scitype as `X` above. Predictions
are probabilistic, but uncalibrated.
- `predict_mode(mach, Xnew)`: instead return the mode of each
prediction above.
# Fitted parameters
The fields of `fitted_params(mach)` are:
- `model`: the forest object, as returned by the core algorithm
- `var_grouping`: the adopted grouping of the features encountered in training, in an order consistent with the output of `printmodel`.
The MLJ interface can currently deal with scalar, temporal and spatial features, but
has one limitation, and one tricky procedure for handling them at the same time.
The limitation is for temporal and spatial features to be uniform in size across the instances (the algorithm will automatically throw away features that do not satisfy this constraint).
As for the tricky procedure: before the learning phase, features are divided into groups (referred to as `modalities`) according to each variable's `channel size`, that is, the size of the vector or matrix.
For example, if X is multimodal, and has three temporal features :x, :y, :z with 10, 10 and 20 points, respectively,
plus three spatial features :R, :G, :B, with the same size 5 × 5 pixels, the algorithm assumes that :x and :y share a temporal axis,
:R, :G, :B share two spatial axis, while :z does not share any axis with any other variable. As a result,
the model will group features into three modalities:
- {1} [:x, :y]
- {2} [:z]
- {3} [:R, :G, :B]
and `var_grouping` will be [["x", "y"], ["z"], ["R", "G", "B"]].
# Report
The fields of `report(mach)` are:
- `printmodel`: method to print a pretty representation of the fitted
model, with single argument the depth of the trees. The interpretation of the tree requires you
to understand how the current MLJ interface of ModalDecisionTrees.jl handles features of different modals.
See `var_grouping` above. Note that the split conditions (or decisions) in the tree are relativized to a specific frame, of which the number is shown.
- `var_grouping`: the adopted grouping of the features encountered in training, in an order consistent with the output of `printmodel`.
See `var_grouping` above.
- `feature_importance_by_count`: a simple count of each of the occurrences of the features across the model, in an order consistent with `var_grouping`.
- `classes_seen`: list of target classes actually observed in training.
# Examples
```julia
using MLJ
using ModalDecisionTrees
using Random
forest = ModalRandomForest(ntrees = 50)
# Load an example dataset (a temporal one)
X, y = ModalDecisionTrees.load_japanesevowels()
N = length(y)
mach = machine(forest, X, y)
# Split dataset
p = randperm(N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
# Fit
fit!(mach, rows=train_idxs)
# Perform predictions, compute accuracy
Xnew = X[test_idxs,:]
ynew = predict_mode(mach, Xnew) # point predictions
accuracy = MLJ.accuracy(ynew, y[test_idxs])
yhat = predict_mode(mach, Xnew) # probabilistic predictions
pdf.(yhat, "1") # probabilities for one of the classes ("1")
# Access raw model
fitted_params(mach).model
report(mach).printmodel(3) # Note that the output here can be quite large.
```
"""
ModalRandomForest
# # Examples
# ```
# using MLJ
# MDT = @load DecisionTreeRegressor pkg=ModalDecisionTrees
# tree = MDT(max_depth=4, min_samples_split=3)
# X, y = make_regression(100, 2) # synthetic data
# mach = machine(tree, X, y) |> fit!
# Xnew, _ = make_regression(3, 2)
# yhat = predict(mach, Xnew) # new predictions
# fitted_params(mach).model # raw tree or stump object from DecisionTree.jl
# ```
# See also
# [DecisionTree.jl](https://github.com/JuliaAI/DecisionTree.jl) and
# the unwrapped model type
# [MLJDecisionTreeInterface.DecisionTree.DecisionTreeRegressor](@ref).
# """
# DecisionTreeRegressor
# # Examples
# ```
# using MLJ
# Forest = @load RandomForestRegressor pkg=ModalDecisionTrees
# forest = Forest(max_depth=4, min_samples_split=3)
# X, y = make_regression(100, 2) # synthetic data
# mach = machine(forest, X, y) |> fit!
# Xnew, _ = make_regression(3, 2)
# yhat = predict(mach, Xnew) # new predictions
# fitted_params(mach).forest # raw `Ensemble` object from DecisionTree.jl
# ```
# See also
# [DecisionTree.jl](https://github.com/JuliaAI/DecisionTree.jl) and
# the unwrapped model type
# [MLJDecisionTreeInterface.DecisionTree.RandomForestRegressor](@ref).
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 5838 | using StatsBase
using StatsBase: mean
using SoleBase: movingwindow
using SoleData: AbstractDimensionalDataset
DOWNSIZE_MSG = "If this process gets killed, please downsize your dataset beforehand."
function make_downsizing_function(channelsize::NTuple)
return function downsize(instance)
return moving_average(instance, channelsize)
end
end
function make_downsizing_function(::TreeModel)
function downsize(instance)
channelsize = MultiData.instance_channelsize(instance)
nvariables = MultiData.instance_nvariables(instance)
channelndims = length(channelsize)
if channelndims == 1
n_points = channelsize[1]
if nvariables > 30 && n_points > 100
# @warn "Downsizing series $(n_points) points to $(100) points ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, 100)
elseif n_points > 150
# @warn "Downsizing series $(n_points) points to $(150) points ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, 150)
end
elseif channelndims == 2
if nvariables > 30 && prod(channelsize) > prod((7,7),)
new_channelsize = min.(channelsize, (7,7))
# @warn "Downsizing image of size $(channelsize) to $(new_channelsize) pixels ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, new_channelsize)
elseif prod(channelsize) > prod((10,10),)
new_channelsize = min.(channelsize, (10,10))
# @warn "Downsizing image of size $(channelsize) to $(new_channelsize) pixels ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, new_channelsize)
end
end
instance
end
end
function make_downsizing_function(::ForestModel)
function downsize(instance)
channelsize = MultiData.instance_channelsize(instance)
nvariables = MultiData.instance_nvariables(instance)
channelndims = length(channelsize)
if channelndims == 1
n_points = channelsize[1]
if nvariables > 30 && n_points > 100
# @warn "Downsizing series $(n_points) points to $(100) points ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, 100)
elseif n_points > 150
# @warn "Downsizing series $(n_points) points to $(150) points ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, 150)
end
elseif channelndims == 2
if nvariables > 30 && prod(channelsize) > prod((4,4),)
new_channelsize = min.(channelsize, (4,4))
# @warn "Downsizing image of size $(channelsize) to $(new_channelsize) pixels ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, new_channelsize)
elseif prod(channelsize) > prod((7,7),)
new_channelsize = min.(channelsize, (7,7))
# @warn "Downsizing image of size $(channelsize) to $(new_channelsize) pixels ($(nvariables) variables). $DOWNSIZE_MSG"
instance = moving_average(instance, new_channelsize)
end
end
instance
end
end
# TODO move to MultiData/SoleData
_mean(::Type{T}, vals::AbstractArray{T}) where {T<:Number} = StatsBase.mean(vals)
_mean(::Type{T1}, vals::AbstractArray{T2}) where {T1<:AbstractFloat,T2<:Integer} = T1(StatsBase.mean(vals))
_mean(::Type{T1}, vals::AbstractArray{T2}) where {T1<:Integer,T2<:AbstractFloat} = round(T1, StatsBase.mean(vals))
# # 1D
# function moving_average(
# instance::AbstractArray{T,1};
# kwargs...
# ) where {T<:Union{Nothing,Number}}
# npoints = length(instance)
# return [_mean(T, instance[idxs]) for idxs in movingwindow(npoints; kwargs...)]
# end
# # 1D
# function moving_average(
# instance::AbstractArray{T,1},
# nwindows::Integer,
# relative_overlap::AbstractFloat = .5,
# ) where {T<:Union{Nothing,Number}}
# npoints = length(instance)
# return [_mean(T, instance[idxs]) for idxs in movingwindow(npoints; nwindows = nwindows, relative_overlap = relative_overlap)]
# end
# 1D-instance
function moving_average(
instance::AbstractArray{T,2},
nwindows::Union{Integer,Tuple{Integer}},
relative_overlap::AbstractFloat = .5,
) where {T<:Union{Nothing,Number}}
nwindows = nwindows isa Tuple{<:Integer} ? nwindows[1] : nwindows
npoints, n_variables = size(instance)
new_instance = similar(instance, (nwindows, n_variables))
for i_variable in 1:n_variables
new_instance[:, i_variable] .= [_mean(T, instance[idxs, i_variable]) for idxs in movingwindow(npoints; nwindows = nwindows, relative_overlap = relative_overlap)]
end
return new_instance
end
# 2D-instance
function moving_average(
instance::AbstractArray{T,3},
new_channelsize::Tuple{Integer,Integer},
relative_overlap::AbstractFloat = .5,
) where {T<:Union{Nothing,Number}}
n_instance, n_Y, n_variables = size(instance)
windows_1 = movingwindow(n_instance; nwindows = new_channelsize[1], relative_overlap = relative_overlap)
windows_2 = movingwindow(n_Y; nwindows = new_channelsize[2], relative_overlap = relative_overlap)
new_instance = similar(instance, (new_channelsize..., n_variables))
for i_variable in 1:n_variables
new_instance[:, :, i_variable] .= [_mean(T, instance[idxs1, idxs2, i_variable]) for idxs1 in windows_1, idxs2 in windows_2]
end
return new_instance
end
function moving_average(dataset::AbstractDimensionalDataset, args...; kwargs...)
return map(instance->moving_average(instance, args...; kwargs...), eachinstance(dataset))
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 719 | function compute_featureimportance(model, var_grouping = nothing; normalize = true)
feature_importance_by_count = MDT.variable_countmap(model)
if !isnothing(var_grouping)
feature_importance_by_count = Dict([
# i_var => var_grouping[i_modality][i_var]
var_grouping[i_modality][i_var] => count
for ((i_modality, i_var), count) in feature_importance_by_count])
end
if normalize
sumcount = sum(Vector{Float64}(collect(values(feature_importance_by_count))))
feature_importance_by_count = Dict([
feature => (count/sumcount)
for (feature, count) in feature_importance_by_count])
end
feature_importance_by_count
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2122 |
using MLJModelInterface
import Base: show
struct ModelPrinter{M<:MDT.SymbolicModel,SM<:SoleModels.AbstractModel}
m::MLJModelInterface.Model
rawmodel::M
solemodel::SM
var_grouping::Union{Nothing,AbstractVector{<:AbstractVector},AbstractVector{<:AbstractDict}}
end
function (c::ModelPrinter)(args...; kwargs...)
c(stdout, args...; kwargs...)
end
# Do not remove (generates compile-time warnings)
function (c::ModelPrinter)(io::IO; kwargs...)
c(io, true, c.m.display_depth; kwargs...)
end
function (c::ModelPrinter)(
io::IO,
max_depth::Union{Nothing,Integer};
kwargs...
)
c(io, true, max_depth = max_depth; kwargs...)
end
function (c::ModelPrinter)(
io::IO,
print_solemodel::Bool,
max_depth::Union{Nothing,Integer} = c.m.display_depth;
kwargs...
)
c(io, (print_solemodel ? c.solemodel : c.rawmodel); max_depth = max_depth, kwargs...)
end
function (c::ModelPrinter)(
io::IO,
model,
X = nothing,
y = nothing;
max_depth = c.m.display_depth,
hidemodality = (isnothing(c.var_grouping) || length(c.var_grouping) == 1),
kwargs...
)
more_kwargs = begin
if model isa Union{MDT.DForest,MDT.DTree,MDT.DTNode}
(; variable_names_map = c.var_grouping, max_depth = max_depth)
elseif model isa SoleModels.AbstractModel
(; max_depth = max_depth, syntaxstring_kwargs = (variable_names_map = c.var_grouping, hidemodality = hidemodality))
else
error("Unexpected model type $(model)")
end
end
# if haskey(kwargs, :variable_names_map) && kwargs.variable_names_map is not multimodal then fix... variable_names_map
if isnothing(X) && isnothing(y)
MDT.printmodel(io, model; silent = true, more_kwargs..., kwargs...)
elseif !isnothing(X) && !isnothing(y)
(X, y, var_grouping, classes_seen) = MMI.reformat(c.m, X, y)
MDT.printapply(io, model, X, y; silent = true, more_kwargs..., kwargs...)
else
error("ModelPrinter: Either provide X and y or don't!")
end
end
Base.show(io::IO, c::ModelPrinter) = print(io, "ModelPrinter object")
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 906 |
# if model.check_conditions == true
# check_conditions(model.conditions)
# end
# function check_conditions(conditions)
# if isnothing(conditions)
# return
# end
# # Check that feature extraction functions are scalar
# wrong_conditions = filter((f)->begin
# !all(
# (ch)->!(f isa Base.Callable) ||
# (ret = f(ch); isa(ret, Real) && typeof(ret) == eltype(ch)),
# [collect(1:10), collect(1.:10.)]
# )
# end, conditions)
# @assert length(wrong_conditions) == 0 "When specifying feature extraction functions " *
# "for inferring `conditions`, please specify " *
# "scalar functions accepting an object of type `AbstractArray{T}` " *
# "and returning an object of type `T`, with `T<:Real`. " *
# "Instead, got wrong feature functions: $(wrong_conditions)."
# end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 6175 |
using SoleData
using SoleData: AbstractModalLogiset, SupportedLogiset
using MultiData
using MultiData: dataframe2dimensional
# UNI
# AbstractArray -> scalarlogiset -> supportedlogiset
# SupportedLogiset -> supportedlogiset
# AbstractModalLogiset -> supportedlogiset
# MULTI
# SoleData.MultiDataset -> multilogiset
# AbstractDataFrame -> naturalgrouping -> multilogiset
# MultiLogiset -> multilogiset
function wrapdataset(
X,
model,
force_var_grouping::Union{Nothing,AbstractVector{<:AbstractVector}} = nothing;
passive_mode = false
)
if X isa MultiLogiset
if !isnothing(force_var_grouping)
@warn "Ignoring var_grouping $(force_var_grouping) (a MultiLogiset was provided)."
end
multimodal_X, var_grouping = X, nothing
return multimodal_X, var_grouping
end
# Vector of instance values
# Matrix instance x variable -> Matrix variable x instance
if X isa AbstractVector
X = collect(reshape(X, 1, length(X)))
elseif X isa AbstractMatrix
X = collect(X')
end
if X isa AbstractArray # Cube
if !(X isa Union{AbstractVector,AbstractMatrix})
@warn "AbstractArray of $(ndims(X)) dimensions and size $(size(X)) encountered. " *
"This will be interpreted as a dataset of $(size(X)[end]) instances, " *
"$(size(X)[end-1]) variables, and channel size $(size(X)[1:end-2])."
# "datasets ($(typeof(X)) encountered)"
end
X = eachslice(X; dims=ndims(X))
end
X = begin
if X isa AbstractDimensionalDataset
X = model.downsize.(eachinstance(X))
if !passive_mode
@info "Precomputing logiset..."
metaconditions = readconditions(model, X)
features = unique(SoleData.feature.(metaconditions))
scalarlogiset(X, features;
use_onestep_memoization = true,
conditions = metaconditions,
relations = readrelations(model, X),
print_progress = (ninstances(X) > 500)
)
else
MultiData.dimensional2dataframe(X)
end
elseif X isa SupportedLogiset
X
elseif X isa AbstractModalLogiset
SupportedLogiset(X;
use_onestep_memoization = true,
conditions = readconditions(model, X),
relations = readrelations(model, X)
)
elseif X isa AbstractMultiDataset
X
elseif Tables.istable(X)
DataFrame(X)
else
X
end
end
# @show X
# @show collect.(X)
# readline()
# DataFrame -> MultiDataset + variable grouping (needed for printing)
X, var_grouping = begin
if X isa AbstractDataFrame
allowedcoltypes = Union{Real,AbstractArray{<:Real,0},AbstractVector{<:Real},AbstractMatrix{<:Real}}
wrong_columns = filter(((colname,c),)->!(eltype(c) <: allowedcoltypes), collect(zip(names(X), eachcol(X))))
@assert length(wrong_columns) == 0 "Invalid columns " *
"encountered: `$(join(first.(wrong_columns), "`, `", "` and `"))`. $(MDT).jl only allows " *
"variables that are `Real` and `AbstractArray{<:Real,N}` with N ∈ {0,1,2}. " *
"Got: `$(join(eltype.(last.(wrong_columns)), "`, `", "` and `"))`" * (length(wrong_columns) > 1 ? ", respectively" : "") * "."
var_grouping = begin
if isnothing(force_var_grouping)
var_grouping = SoleData.naturalgrouping(X; allow_variable_drop = true)
if !(length(var_grouping) == 1 && length(var_grouping[1]) == ncol(X))
@info "Using variable grouping:\n" *
# join(map(((i_mod,variables),)->"[$i_mod] -> [$(join(string.(variables), ", "))]", enumerate(var_grouping)), "\n")
join(map(((i_mod,variables),)->"\t{$i_mod} => $(Tuple(variables))", enumerate(var_grouping)), "\n")
end
var_grouping
else
@assert force_var_grouping isa AbstractVector{<:AbstractVector} "$(typeof(force_var_grouping))"
force_var_grouping
end
end
md = MultiDataset(X, var_grouping)
# Downsize
md = MultiDataset([begin
mod, varnames = dataframe2dimensional(mod)
mod = model.downsize.(eachinstance(mod))
SoleData.dimensional2dataframe(mod, varnames)
end for mod in eachmodality(md)])
md, var_grouping
else
X, nothing
end
end
# println(X)
# println(modality(X, 1))
multimodal_X = begin
if X isa SoleData.AbstractMultiDataset
if !passive_mode || !SoleData.ismultilogiseed(X)
@info "Precomputing logiset..."
MultiLogiset([begin
_metaconditions = readconditions(model, mod)
features = unique(SoleData.feature.(_metaconditions))
# @show _metaconditions
# @show features
scalarlogiset(mod, features;
use_onestep_memoization = true,
conditions = _metaconditions,
relations = readrelations(model, mod),
print_progress = (ninstances(X) > 500)
)
end for mod in eachmodality(X)
])
else
X
end
elseif X isa AbstractModalLogiset
MultiLogiset(X)
elseif X isa MultiLogiset
X
else
error("Unexpected dataset type: $(typeof(X)). Allowed dataset types are " *
"AbstractArray, AbstractDataFrame, " *
"SoleData.AbstractMultiDataset and SoleData.AbstractModalLogiset.")
end
end
return (multimodal_X, var_grouping)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 8266 | using SoleData: ExistentialTopFormula
# function translate(
# node::DTInternal{L,D},
# initconditions,
# all_ancestors::Vector{<:DTInternal} = DTInternal[],
# all_ancestor_formulas::Vector = [],
# pos_ancestors::Vector{<:DTInternal} = DTInternal[],
# info = (;),
# shortform::Union{Nothing,MultiFormula} = nothing,
# ) where {L,D<:DoubleEdgedDecision}
# forthnode = forth(node)
# subtree_nodes = []
# cur_node = node
# while cur_node != forthnode
# push!(subtree_nodes, cur_node)
# @assert isinleftsubtree(forthnode, cur_node) || isinrightsubtree(forthnode, cur_node) "Translation error! Illegal case detected."
# cur_node = isinleftsubtree(forthnode, cur_node) ? left(cur_node) : right(cur_node)
# end
# @show length(subtree_nodes)
# @show displaydecision.(decision.(subtree_nodes))
# println(displaydecision.(decision.(subtree_nodes)))
# push!(subtree_nodes, forthnode)
# new_all_ancestors = DTInternal{L,<:DoubleEdgedDecision}[all_ancestors..., subtree_nodes...]
# new_pos_ancestors = DTInternal{L,<:DoubleEdgedDecision}[pos_ancestors..., subtree_nodes...]
# for (i, (νi, νj)) in enumerate(zip(new_all_ancestors[2:end], new_all_ancestors[1:end-1]))
# if !(isleftchild(νi, νj) || isrightchild(νi, νj))
# error("ERROR")
# @show νi
# @show νj
# end
# end
# φl = pathformula(new_all_ancestors, left(forthnode), false)
# φr = SoleLogics.normalize(¬(φl); allow_atom_flipping=true, prefer_implications = true)
# new_all_ancestor_formulas = [all_ancestor_formulas..., φl]
# # @show φl, φr
# # φr = pathformula(new_pos_ancestors, right(node), true)
# # @show syntaxstring(φl)
# pos_shortform, neg_shortform = begin
# if length(all_ancestors) == 0
# (
# φl,
# φr,
# )
# else
# my_conjuncts = [begin
# (isinleftsubtree(node, anc) ? φ : SoleLogics.normalize(¬(φ); allow_atom_flipping=true, prefer_implications = true))
# end for (φ, anc) in zip(all_ancestor_formulas, all_ancestors)]
# my_left_conjuncts = [my_conjuncts..., φl]
# my_right_conjuncts = [my_conjuncts..., φr]
# ∧(my_left_conjuncts...), ∧(my_right_conjuncts...)
# end
# end
# info = merge(info, (;
# this = translate(ModalDecisionTrees.this(node), initconditions, new_all_ancestors, all_ancestor_formulas, new_pos_ancestors, (;), shortform),
# supporting_labels = ModalDecisionTrees.supp_labels(node),
# ))
# if !isnothing(shortform)
# # @show syntaxstring(shortform)
# info = merge(info, (;
# shortform = build_antecedent(shortform, initconditions),
# ))
# end
# SoleModels.Branch(
# build_antecedent(φl, initconditions),
# translate(left(forthnode), initconditions, new_all_ancestors, new_all_ancestor_formulas, new_pos_ancestors, (;), pos_shortform),
# translate(right(forthnode), initconditions, new_all_ancestors, new_all_ancestor_formulas, pos_ancestors, (;), neg_shortform),
# info
# )
# end
# isback(backnode::DTInternal, back::DTInternal) = (backnode == back(node))
function isimplicative(f::Formula)
t = tree(f)
isimpl = (token(t) == (→)) ||
(SoleLogics.isbox(token(t)) && SoleLogics.isunary(token(t)) && first(children(t)) == (→))
println(syntaxstring(f), "\t", isimpl)
return isimpl
end
function pathformula(
ancestors::Vector{<:DTInternal{L,<:DoubleEdgedDecision}},
node::DTNode{LL},
multimodal::Bool,
args...;
kwargs...
) where {L,LL}
φ, isimpl = _pathformula_complete(DTNode{Union{L,LL},<:DoubleEdgedDecision}[ancestors..., node], multimodal, args...; kwargs...)
println(syntaxstring(φ))
println()
println()
println()
return φ
end
# TODO @memoize
function _pathformula_complete(
path::Vector{<:DTNode{L,<:DoubleEdgedDecision}},
multimodal::Bool,
# dontincrease::Bool = true,
# addlast = true,
# perform_checks = true,
) where {L}
if !multimodal
println([ν isa DTLeaf ? ModalDecisionTrees.prediction(ν) : displaydecision(decision(ν)) for ν in path])
path = filter(ν->((ν isa DTLeaf) || i_modality(ν) == i_modality(last(path[1:end-1]))), path)
# @assert length(path) > 0
end
h = length(path)-1
# println([displaydecision.(decision.(path[1:end-1]))..., ModalDecisionTrees.prediction(path[end])])
println([ν isa DTLeaf ? ModalDecisionTrees.prediction(ν) : displaydecision(decision(ν)) for ν in path])
@show h
if h == 0
return (SoleLogics.⊤, false)
# return error("pathformula cannot accept path of height 0.")
elseif h == 1
ν0, ν1 = path
return (MultiFormula(i_modality(ν0), get_lambda(ν0, ν1)), false)
else
ν0, ν1 = path[1], path[2]
_lambda = get_lambda(ν0, ν1)
@show syntaxstring(_lambda)
path1, path2, ctr, ctr_child = begin
# # if perform_checks
# contributors = filter(ν->back(ν) == ν1, path)
# @assert length(contributors) <= 1
# return length(contributors) == 1 ? first(contributors) : ν1
i_ctr, ctr = begin
i_ctr, ctr = nothing, nothing
i_ctr, ctr = 2, path[2]
for (i_node, ν) in enumerate(path[1:end-1])
if back(ν) == ν1
i_ctr, ctr = i_node, ν
break
end
end
i_ctr, ctr
end
path1, path2 = path[2:i_ctr], path[i_ctr:end]
@show i_ctr
ctr_child = path[i_ctr+1]
path1, path2, ctr, ctr_child
end
agreement = !xor(isleftchild(ν1, ν0), isleftchild(ctr_child, ctr))
f1, _ = _pathformula_complete(path1, true)
f2, f2_isimpl = _pathformula_complete(path2, true)
# DEBUG:
# λ = MultiFormula(i_modality(ν0), _lambda)
# f1 = f1 == ⊤ ? MultiFormula(1, f1) : f1
# f2 = f2 == ⊤ ? MultiFormula(1, f2) : f2
# @show syntaxstring(λ)
# @show syntaxstring(f1)
# @show syntaxstring(f2)
# END DEBUG
# f2_isimpl = isimplicative(f2)
ded = decision(ν0)
isprop = is_propositional_decision(ded)
if isprop
λ = MultiFormula(i_modality(ν0), _lambda)
if !xor(agreement, !f2_isimpl)
# return (λ ∧ (f1 ∧ f2), false)
if f1 == ⊤ && f2 != ⊤ return (λ ∧ f2, false)
elseif f1 != ⊤ && f2 == ⊤ return (λ ∧ f1, false)
elseif f1 != ⊤ && f2 != ⊤ return (λ ∧ (f1 ∧ f2), false)
else return (λ, false)
end
else
# return (λ → (f1 → f2), true)
if f1 == ⊤ && f2 != ⊤ return (λ → f2, true)
elseif f1 != ⊤ && f2 == ⊤ return (λ → f1, true)
elseif f1 != ⊤ && f2 != ⊤ return (λ → (f1 → f2), true)
else return (λ, true)
end
end
else
rel = relation(formula(ded))
if !xor(agreement, !f2_isimpl)
◊ = SoleLogics.diamond(rel)
# return (◊(f1 ∧ f2), false)
if f1 == ⊤ && f2 != ⊤ return (◊(f2), false)
elseif f1 != ⊤ && f2 == ⊤ return (◊(f1), false)
elseif f1 != ⊤ && f2 != ⊤ return (◊(f1 ∧ f2), false)
else return (◊(⊤), false)
end
else
□ = SoleLogics.box(rel)
# return (□(f1 → f2), true)
if f1 == ⊤ && f2 != ⊤ return (□(f2), true)
elseif f1 != ⊤ && f2 == ⊤ return (□(f1), true)
elseif f1 != ⊤ && f2 != ⊤ return (□(f1 → f2), true)
else return (⊤, true)
end
end
end
end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 19988 | using Revise
using SoleLogics
using SoleModels
using SoleModels: info
using SoleData
using SoleData: ScalarCondition, ScalarMetaCondition
using SoleData: AbstractFeature
using SoleData: relation, feature, test_operator, threshold, inverse_test_operator
using ModalDecisionTrees: DTInternal, DTNode, DTLeaf, NSDTLeaf
using ModalDecisionTrees: isleftchild, isrightchild, isinleftsubtree, isinrightsubtree
using ModalDecisionTrees: left, right
using FunctionWrappers: FunctionWrapper
using Memoization
############################################################################################
# MDTv1 translation
############################################################################################
function build_antecedent(a::MultiFormula, initconditions)
MultiFormula(Dict([i_mod => anchor(f, initconditions[i_mod]) for (i_mod, f) in modforms(a)]))
end
function translate(model::SoleModels.AbstractModel; kwargs...)
return model
end
# TODO remove
function translate(
model::Union{DTree,DForest};
info = (;),
kwargs...
)
return translate(model, info; kwargs...)
end
function translate(
forest::DForest,
info = (;);
kwargs...
)
pure_trees = [translate(tree; kwargs...) for tree in trees(forest)]
info = merge(info, (;
metrics = metrics(forest),
))
return SoleModels.DecisionForest(pure_trees, info)
end
function translate(
tree::DTree,
info = (;);
kwargs...
)
pure_root = translate(ModalDecisionTrees.root(tree), ModalDecisionTrees.initconditions(tree); kwargs...)
info = merge(info, SoleModels.info(pure_root))
info = merge(info, (;))
return SoleModels.DecisionTree(pure_root, info)
end
function translate(
tree::DTLeaf,
initconditions,
args...;
info = (;),
shortform = nothing,
optimize_shortforms = nothing,
)
info = merge(info, (;
supporting_labels = ModalDecisionTrees.supp_labels(tree),
supporting_predictions = ModalDecisionTrees.predictions(tree),
))
if !isnothing(shortform)
info = merge(info, (;
shortform = build_antecedent(shortform, initconditions),
))
end
return SoleModels.ConstantModel(ModalDecisionTrees.prediction(tree), info)
end
function translate(
tree::NSDTLeaf,
initconditions,
args...;
info = (;),
shortform = nothing,
optimize_shortforms = nothing,
)
info = merge(info, (;
supporting_labels = ModalDecisionTrees.supp_labels(tree),
supporting_predictions = ModalDecisionTrees.predictions(tree),
))
if !isnothing(shortform)
info = merge(info, (;
shortform = build_antecedent(shortform, initconditions),
))
end
return SoleModels.FunctionModel(ModalDecisionTrees.predicting_function(tree), info)
end
############################################################################################
############################################################################################
############################################################################################
function translate(
node::DTInternal{L,D},
initconditions,
path::Vector{<:DTInternal} = DTInternal[],
pos_path::Vector{<:DTInternal} = DTInternal[],
ancestors::Vector{<:DTInternal} = DTInternal[],
ancestor_formulas::Vector = [];
info = (;),
shortform::Union{Nothing,MultiFormula} = nothing,
optimize_shortforms::Bool = true
) where {L,D<:AbstractDecision}
if D<:RestrictedDecision
forthnode = node
new_ancestors = DTInternal{L,<:RestrictedDecision}[ancestors..., forthnode]
new_path = DTInternal{L,<:RestrictedDecision}[path..., forthnode]
new_pos_path = DTInternal{L,<:RestrictedDecision}[pos_path..., forthnode]
φl = pathformula(new_pos_path, left(forthnode), false)
elseif D<:DoubleEdgedDecision
forthnode = forth(node)
subtree_nodes = []
cur_node = node
while cur_node != forthnode
push!(subtree_nodes, cur_node)
@assert isinleftsubtree(forthnode, cur_node) || isinrightsubtree(forthnode, cur_node) "Translation error! Illegal case detected."
cur_node = isinleftsubtree(forthnode, cur_node) ? left(cur_node) : right(cur_node)
end
# @show length(subtree_nodes)
# @show displaydecision.(decision.(subtree_nodes))
# println(displaydecision.(decision.(subtree_nodes)))
push!(subtree_nodes, forthnode)
new_ancestors = DTInternal{L,<:DoubleEdgedDecision}[ancestors..., forthnode]
new_path = DTInternal{L,<:DoubleEdgedDecision}[path..., subtree_nodes...]
new_pos_path = DTInternal{L,<:DoubleEdgedDecision}[pos_path..., subtree_nodes...]
# DEBUG
# for (i, (νi, νj)) in enumerate(zip(new_path[2:end], new_path[1:end-1]))
# if !(isleftchild(νi, νj) || isrightchild(νi, νj))
# error("ERROR")
# @show νi
# @show νj
# end
# end
φl = pathformula(new_path, left(forthnode), false)
else
error("Unexpected decision type: $(D)")
end
φr = SoleLogics.normalize(¬(φl); allow_atom_flipping=true, prefer_implications = true)
new_ancestor_formulas = [ancestor_formulas..., φl]
# φr = pathformula(new_pos_path, right(forthnode), true)
# @show syntaxstring(φl)
pos_shortform, neg_shortform = begin
if length(path) == 0
(
φl,
φr,
)
else
# my_conjuncts = [begin
# # anc_prefix = new_path[1:nprefix]
# # cur_node = new_path[nprefix+1]
# anc_prefix = new_path[1:(nprefix+1)]
# new_pos_path = similar(anc_prefix, 0)
# for i in 1:(length(anc_prefix)-1)
# if isinleftsubtree(anc_prefix[i+1], anc_prefix[i])
# push!(new_pos_path, anc_prefix[i])
# end
# end
# φ = pathformula(new_pos_path, anc_prefix[end], false)
# (isinleftsubtree(node, anc_prefix[end]) ? φ : ¬φ)
# end for nprefix in 1:(length(new_path)-1)]
# @assert length(ancestor_formulas) == length(ancestors)
my_conjuncts = [begin
(isinleftsubtree(node, anc) ? φ : SoleLogics.normalize(¬(φ); allow_atom_flipping=true, prefer_implications = true))
end for (φ, anc) in zip(ancestor_formulas, ancestors)]
my_left_conjuncts = [my_conjuncts..., φl]
my_right_conjuncts = [my_conjuncts..., φr]
# println()
# println()
# println()
# @show syntaxstring.(my_left_conjuncts)
# @show syntaxstring.(my_right_conjuncts)
# @show syntaxstring(∧(my_left_conjuncts...))
# @show syntaxstring(∧(my_right_conjuncts...))
if optimize_shortforms
# if D <: DoubleEdgedDecision
# error("optimize_shortforms is untested with DoubleEdgedDecision's.")
# end
# Remove nonmaximal positives (for each modality)
modalities = unique(i_modality.(new_ancestors))
my_filtered_left_conjuncts = similar(my_left_conjuncts, 0)
my_filtered_right_conjuncts = similar(my_right_conjuncts, 0)
for i_mod in modalities
this_mod_mask = map((anc)->i_modality(anc) == i_mod, new_ancestors)
# @show this_mod_mask
this_mod_ancestors = new_ancestors[this_mod_mask]
# @show syntaxstring.(formula.(decision.(this_mod_ancestors)))
ispos_ancestors = [isinleftsubtree(ν2, ν1) for (ν2, ν1) in zip(this_mod_ancestors[2:end], this_mod_ancestors[1:end-1])]
# @show ispos_ancestors
begin
this_mod_conjuncts = my_left_conjuncts[this_mod_mask]
# ispos = map(anc->isinleftsubtree(left(forthnode), anc), this_mod_ancestors)
ispos = [ispos_ancestors..., true]
lastpos = findlast(x->x == true, ispos)
# @show i_mod, ispos
if !isnothing(lastpos)
this_mod_conjuncts = [this_mod_conjuncts[lastpos], this_mod_conjuncts[(!).(ispos)]...]
end
# @show this_mod_conjuncts
append!(my_filtered_left_conjuncts, this_mod_conjuncts)
end
begin
this_mod_conjuncts = my_right_conjuncts[this_mod_mask]
# ispos = map(anc->isinleftsubtree(right(forthnode), anc), this_mod_ancestors)
ispos = [ispos_ancestors..., false]
lastpos = findlast(x->x == true, ispos)
# @show i_mod, ispos
if !isnothing(lastpos)
this_mod_conjuncts = [this_mod_conjuncts[lastpos], this_mod_conjuncts[(!).(ispos)]...]
end
# @show this_mod_conjuncts
append!(my_filtered_right_conjuncts, this_mod_conjuncts)
end
end
# @show syntaxstring(∧(my_filtered_left_conjuncts...))
# @show syntaxstring(∧(my_filtered_right_conjuncts...))
∧(my_filtered_left_conjuncts...), ∧(my_filtered_right_conjuncts...)
else
∧(my_left_conjuncts...), ∧(my_right_conjuncts...)
end
end
end
# pos_conj = pathformula(new_pos_path[1:end-1], new_pos_path[end], false)
# @show pos_conj
# @show syntaxstring(pos_shortform)
# @show syntaxstring(neg_shortform)
# # shortforms for my children
# pos_shortform, neg_shortform = begin
# if isnothing(shortform)
# φl, φr
# else
# dl, dr = Dict{Int64,SoleLogics.SyntaxTree}(deepcopy(modforms(shortform))), Dict{Int64,SoleLogics.SyntaxTree}(deepcopy(modforms(shortform)))
# dl[i_modality(node)] = modforms(φl)[i_modality(node)]
# dr[i_modality(node)] = modforms(φr)[i_modality(node)]
# MultiFormula(dl), MultiFormula(dr)
# end
# end
forthnode_as_a_leaf = ModalDecisionTrees.this(forthnode)
this_as_a_leaf = translate(forthnode_as_a_leaf, initconditions, new_path, new_pos_path, ancestors, ancestor_formulas; shortform = shortform, optimize_shortforms = optimize_shortforms)
info = merge(info, (;
this = this_as_a_leaf,
# supporting_labels = SoleModels.info(this_as_a_leaf, :supporting_labels),
supporting_labels = ModalDecisionTrees.supp_labels(forthnode_as_a_leaf),
# supporting_predictions = SoleModels.info(this_as_a_leaf, :supporting_predictions),
supporting_predictions = ModalDecisionTrees.predictions(forthnode_as_a_leaf),
))
if !isnothing(shortform)
# @show syntaxstring(shortform)
info = merge(info, (;
shortform = build_antecedent(shortform, initconditions),
))
end
SoleModels.Branch(
build_antecedent(φl, initconditions),
translate(left(forthnode), initconditions, new_path, new_pos_path, new_ancestors, new_ancestor_formulas; shortform = pos_shortform, optimize_shortforms = optimize_shortforms),
translate(right(forthnode), initconditions, new_path, pos_path, new_ancestors, new_ancestor_formulas; shortform = neg_shortform, optimize_shortforms = optimize_shortforms),
info
)
end
# function translate(
# node::DTInternal{L,D},
# initconditions,
# all_ancestors::Vector{<:DTInternal} = DTInternal[],
# all_ancestor_formulas::Vector = [],
# pos_ancestors::Vector{<:DTInternal} = DTInternal[];
# info = (;),
# shortform::Union{Nothing,MultiFormula} = nothing,
# optimize_shortforms = false,
# ) where {L,D<:RestrictedDecision}
# new_all_ancestors = DTInternal{L,<:RestrictedDecision}[all_ancestors..., node]
# new_pos_ancestors = DTInternal{L,<:RestrictedDecision}[pos_ancestors..., node]
# φl = pathformula(new_pos_ancestors, left(node), false)
# φr = SoleLogics.normalize(¬(φl); allow_atom_flipping=true, prefer_implications = true)
# new_all_ancestor_formulas = [all_ancestor_formulas..., φl]
# # @show φl, φr
# # φr = pathformula(new_pos_ancestors, right(node), true)
# # @show syntaxstring(φl)
# pos_shortform, neg_shortform = begin
# if length(all_ancestors) == 0
# (
# φl,
# φr,
# )
# else
# # my_conjuncts = [begin
# # # anc_prefix = new_all_ancestors[1:nprefix]
# # # cur_node = new_all_ancestors[nprefix+1]
# # anc_prefix = new_all_ancestors[1:(nprefix+1)]
# # new_pos_all_ancestors = similar(anc_prefix, 0)
# # for i in 1:(length(anc_prefix)-1)
# # if isinleftsubtree(anc_prefix[i+1], anc_prefix[i])
# # push!(new_pos_all_ancestors, anc_prefix[i])
# # end
# # end
# # φ = pathformula(new_pos_all_ancestors, anc_prefix[end], false)
# # (isinleftsubtree(node, anc_prefix[end]) ? φ : ¬φ)
# # end for nprefix in 1:(length(new_all_ancestors)-1)]
# my_conjuncts = [begin
# (isinleftsubtree(node, anc) ? φ : SoleLogics.normalize(¬(φ); allow_atom_flipping=true, prefer_implications = true))
# end for (φ, anc) in zip(all_ancestor_formulas, all_ancestors)]
# my_left_conjuncts = [my_conjuncts..., φl]
# my_right_conjuncts = [my_conjuncts..., φr]
# # Remove nonmaximal positives (for each modality)
# modalities = unique(i_modality.(new_all_ancestors))
# my_filtered_left_conjuncts = similar(my_left_conjuncts, 0)
# my_filtered_right_conjuncts = similar(my_right_conjuncts, 0)
# for i_mod in modalities
# this_mod_mask = map((anc)->i_modality(anc) == i_mod, new_all_ancestors)
# this_mod_ancestors = new_all_ancestors[this_mod_mask]
# begin
# this_mod_conjuncts = my_left_conjuncts[this_mod_mask]
# ispos = map(anc->isinleftsubtree(left(node), anc), this_mod_ancestors)
# lastpos = findlast(x->x, ispos)
# # @show i_mod, ispos
# if !isnothing(lastpos)
# this_mod_conjuncts = [this_mod_conjuncts[lastpos], this_mod_conjuncts[(!).(ispos)]...]
# end
# append!(my_filtered_left_conjuncts, this_mod_conjuncts)
# end
# begin
# this_mod_conjuncts = my_right_conjuncts[this_mod_mask]
# ispos = map(anc->isinleftsubtree(right(node), anc), this_mod_ancestors)
# lastpos = findlast(x->x, ispos)
# # @show i_mod, ispos
# if !isnothing(lastpos)
# this_mod_conjuncts = [this_mod_conjuncts[lastpos], this_mod_conjuncts[(!).(ispos)]...]
# end
# append!(my_filtered_right_conjuncts, this_mod_conjuncts)
# end
# end
# ∧(my_filtered_left_conjuncts...), ∧(my_filtered_right_conjuncts...)
# end
# end
# # pos_conj = pathformula(new_pos_ancestors[1:end-1], new_pos_ancestors[end], false)
# # @show pos_conj
# # @show syntaxstring(pos_shortform)
# # @show syntaxstring(neg_shortform)
# # # shortforms for my children
# # pos_shortform, neg_shortform = begin
# # if isnothing(shortform)
# # φl, φr
# # else
# # dl, dr = Dict{Int64,SoleLogics.SyntaxTree}(deepcopy(modforms(shortform))), Dict{Int64,SoleLogics.SyntaxTree}(deepcopy(modforms(shortform)))
# # dl[i_modality(node)] = modforms(φl)[i_modality(node)]
# # dr[i_modality(node)] = modforms(φr)[i_modality(node)]
# # MultiFormula(dl), MultiFormula(dr)
# # end
# # end
# info = merge(info, (;
# this = translate(ModalDecisionTrees.this(node), initconditions, new_all_ancestors, all_ancestor_formulas, new_pos_ancestors; shortform = shortform, optimize_shortforms = optimize_shortforms),
# supporting_labels = ModalDecisionTrees.supp_labels(node),
# ))
# if !isnothing(shortform)
# # @show syntaxstring(shortform)
# info = merge(info, (;
# shortform = build_antecedent(shortform, initconditions),
# ))
# end
# SoleModels.Branch(
# build_antecedent(φl, initconditions),
# translate(left(node), initconditions, new_all_ancestors, new_all_ancestor_formulas, new_pos_ancestors; shortform = pos_shortform, optimize_shortforms = optimize_shortforms),
# translate(right(node), initconditions, new_all_ancestors, new_all_ancestor_formulas, pos_ancestors; shortform = neg_shortform, optimize_shortforms = optimize_shortforms),
# info
# )
# end
############################################################################################
############################################################################################
############################################################################################
function _condition(feature::AbstractFeature, test_op, threshold::T) where {T}
# t = FunctionWrapper{Bool,Tuple{U,T}}(test_op)
metacond = ScalarMetaCondition(feature, test_op)
cond = ScalarCondition(metacond, threshold)
return cond
end
function _atom(φ::ScalarCondition)
test_op = test_operator(φ)
return Atom(_condition(feature(φ), test_op, threshold(φ)))
end
function _atom_inv(φ::ScalarCondition)
test_op = inverse_test_operator(test_operator(φ))
return Atom(_condition(feature(φ), test_op, threshold(φ)))
end
function _atom(p::String)
return Atom(p)
end
function _atom_inv(p::String)
# return Atom(startswith(p, "¬") ? p[nextind(p,1):end] : "¬$p")
# return startswith(p, "¬") ? Atom(p[nextind(p,1):end]) : ¬(Atom(p))
return ¬(Atom(p))
end
get_atom(φ::Atom) = φ
get_atom_inv(φ::Atom) = ¬(φ)
get_atom(φ::ExistentialTopFormula) = ⊤
get_atom_inv(φ::ExistentialTopFormula) = ⊥
get_diamond_op(φ::ExistentialTopFormula) = DiamondRelationalConnective(relation(φ))
get_box_op(φ::ExistentialTopFormula) = BoxRelationalConnective(relation(φ))
get_atom(φ::ScalarExistentialFormula) = _atom(φ.p)
get_atom_inv(φ::ScalarExistentialFormula) = _atom_inv(φ.p)
get_diamond_op(φ::ScalarExistentialFormula) = DiamondRelationalConnective(relation(φ))
get_box_op(φ::ScalarExistentialFormula) = BoxRelationalConnective(relation(φ))
# function is_propositional(node::DTNode)
# f = formula(ModalDecisionTrees.decision(node))
# isprop = (relation(f) == identityrel)
# return isprop
# end
function get_lambda(parent::DTNode, child::DTNode)
d = ModalDecisionTrees.decision(parent)
f = formula(d)
# isprop = (relation(f) == identityrel)
isprop = is_propositional_decision(d)
if isinleftsubtree(child, parent)
p = get_atom(f)
if isprop
return SyntaxTree(p)
else
diamond_op = get_diamond_op(f)
return diamond_op(p)
end
elseif isinrightsubtree(child, parent)
p_inv = get_atom_inv(f)
if isprop
return SyntaxTree(p_inv)
else
box_op = get_box_op(f)
return box_op(p_inv)
end
else
error("Cannot compute pathformula on malformed path: $((child, parent)).")
end
end
include("complete.jl")
include("restricted.jl")
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2844 |
# Compute path formula using semantics from TODO cite
@memoize function pathformula(
pos_ancestors::Vector{<:DTInternal{L,<:RestrictedDecision{<:ScalarExistentialFormula}}},
node::DTNode{LL},
multimodal::Bool,
dontincrease::Bool = true,
addlast = true,
) where {L,LL}
if length(pos_ancestors) == 0
# return error("pathformula cannot accept 0 pos_ancestors. node = $(node).")
# @show get_lambda(node, left(node))
return MultiFormula(i_modality(node), get_lambda(node, left(node)))
else
# Compute single-modality formula to check.
if !multimodal
pos_ancestors = filter(a->i_modality(a) == i_modality(last(pos_ancestors)), pos_ancestors)
# @assert length(pos_ancestors) > 0
end
if length(pos_ancestors) == 1
# @show prediction(this(node))
anc = first(pos_ancestors)
# @show get_lambda(anc, node)
return MultiFormula(i_modality(anc), get_lambda(anc, node))
else
nodes = begin
if addlast
[pos_ancestors..., node]
else
pos_ancestors
end
end
f = formula(ModalDecisionTrees.decision(nodes[1]))
p = MultiFormula(i_modality(nodes[1]), SyntaxTree(get_atom(f)))
isprop = is_propositional_decision(decision(nodes[1]))
_dontincrease = isprop
φ = pathformula(Vector{DTInternal{Union{L,LL},<:RestrictedDecision{<:ScalarExistentialFormula}}}(nodes[2:(end-1)]), nodes[end], multimodal, _dontincrease, addlast)
# @assert length(unique(anc_mods)) == 1 "At the moment, translate does not work " *
# "for MultiFormula formulas $(unique(anc_mods))."
# @show addlast
if (addlast && isinleftsubtree(nodes[end], nodes[end-1])) || (!addlast && isinleftsubtree(node, nodes[end])) # Remember: don't use isleftchild, because it fails in the multimodal case.
# @show "DIAMOND"
if isprop
return dontincrease ? φ : (p ∧ φ)
else
◊ = get_diamond_op(f)
return ◊(p ∧ φ)
end
elseif (addlast && isinrightsubtree(nodes[end], nodes[end-1])) || (!addlast && isinrightsubtree(node, nodes[end])) # Remember: don't use isrightchild, because it fails in the multimodal case.
# @show "BOX"
if isprop
return dontincrease ? φ : (p → φ)
else
□ = get_box_op(f)
return □(p → φ)
end
else
error("Cannot compute pathformula on malformed path: $((nodes[end], nodes[end-1])).")
end
end
end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 73064 | export TestOperator,
canonical_geq, canonical_leq,
CanonicalFeatureGeqSoft, CanonicalFeatureLeqSoft
abstract type TestOperator end
################################################################################
################################################################################
abstract type TestOperatorPositive <: TestOperator end
abstract type TestOperatorNegative <: TestOperator end
polarity(::TestOperatorPositive) = true
polarity(::TestOperatorNegative) = false
@inline bottom(::TestOperatorPositive, T::Type) = typemin(T)
@inline bottom(::TestOperatorNegative, T::Type) = typemax(T)
@inline opt(::TestOperatorPositive) = max
@inline opt(::TestOperatorNegative) = min
# Warning: I'm assuming all operators are "closed" (= not strict, like >= and <=)
@inline evaluate_thresh_decision(::TestOperatorPositive, t::T, gamma::T) where {T} = (t <= gamma)
@inline evaluate_thresh_decision(::TestOperatorNegative, t::T, gamma::T) where {T} = (t >= gamma)
compute_modal_gamma(test_operator::Union{TestOperatorPositive,TestOperatorNegative}, w::WorldType, relation::AbstractRelation, channel::DimensionalChannel{T,N}) where {WorldType<:AbstractWorld,T,N} = begin
worlds = accessibles(channel, [w], relation)
# TODO rewrite as reduce(opt(test_operator), (computePropositionalThreshold(test_operator, w, channel) for w in worlds); init=bottom(test_operator, T))
v = bottom(test_operator, T)
for w in worlds
e = computePropositionalThreshold(test_operator, w, channel)
v = opt(test_operator)(v,e)
end
v
end
computeModalThresholdDual(test_operator::TestOperatorPositive, w::WorldType, relation::AbstractRelation, channel::DimensionalChannel{T,N}) where {WorldType<:AbstractWorld,T,N} = begin
worlds = accessibles(channel, [w], relation)
extr = (typemin(T),typemax(T))
for w in worlds
e = computePropositionalThresholdDual(test_operator, w, channel)
extr = (min(extr[1],e[1]), max(extr[2],e[2]))
end
extr
end
computeModalThresholdMany(test_ops::Vector{<:TestOperator}, w::WorldType, relation::AbstractRelation, channel::DimensionalChannel{T,N}) where {WorldType<:AbstractWorld,T,N} = begin
[compute_modal_gamma(test_op, w, relation, channel) for test_op in test_ops]
end
################################################################################
################################################################################
# ⪴ and ⪳, that is, "*all* of the values on this world are at least, or at most ..."
struct CanonicalFeatureGeq <: TestOperatorPositive end; const canonical_geq = CanonicalFeatureGeq();
struct CanonicalFeatureLeq <: TestOperatorNegative end; const canonical_leq = CanonicalFeatureLeq();
dual_test_operator(::CanonicalFeatureGeq) = canonical_leq
dual_test_operator(::CanonicalFeatureLeq) = canonical_geq
# TODO introduce singleton design pattern for these constants
primary_test_operator(x::CanonicalFeatureGeq) = canonical_geq # x
primary_test_operator(x::CanonicalFeatureLeq) = canonical_geq # dual_test_operator(x)
siblings(::CanonicalFeatureGeq) = []
siblings(::CanonicalFeatureLeq) = []
Base.show(io::IO, test_operator::CanonicalFeatureGeq) = print(io, "⪴")
Base.show(io::IO, test_operator::CanonicalFeatureLeq) = print(io, "⪳")
@inline computePropositionalThreshold(::CanonicalFeatureGeq, w::AbstractWorld, channel::DimensionalChannel{T,N}) where {T,N} = begin
# println(CanonicalFeatureGeq)
# println(w)
# println(channel)
# println(maximum(ch_readWorld(w,channel)))
# readline()
minimum(ch_readWorld(w,channel))
end
@inline computePropositionalThreshold(::CanonicalFeatureLeq, w::AbstractWorld, channel::DimensionalChannel{T,N}) where {T,N} = begin
# println(CanonicalFeatureLeq)
# println(w)
# println(channel)
# readline()
maximum(ch_readWorld(w,channel))
end
@inline computePropositionalThresholdDual(::CanonicalFeatureGeq, w::AbstractWorld, channel::DimensionalChannel{T,N}) where {T,N} = extrema(ch_readWorld(w,channel))
@inline test_decision(test_operator::CanonicalFeatureGeq, w::AbstractWorld, channel::DimensionalChannel{T,N}, threshold::Number) where {T,N} = begin # TODO maybe this becomes SIMD, or sum/all(ch_readWorld(w,channel) .<= threshold)
# Source: https://stackoverflow.com/questions/47564825/check-if-all-the-elements-of-a-julia-array-are-equal
# @inbounds
# TODO try:
# all(ch_readWorld(w,channel) .>= threshold)
for x in ch_readWorld(w,channel)
x >= threshold || return false
end
return true
end
@inline test_decision(test_operator::CanonicalFeatureLeq, w::AbstractWorld, channel::DimensionalChannel{T,N}, threshold::Number) where {T,N} = begin # TODO maybe this becomes SIMD, or sum/all(ch_readWorld(w,channel) .<= threshold)
# Source: https://stackoverflow.com/questions/47564825/check-if-all-the-elements-of-a-julia-array-are-equal
# @info "WLes" w threshold #n ch_readWorld(w,channel)
# @inbounds
# TODO try:
# all(ch_readWorld(w,channel) .<= threshold)
for x in ch_readWorld(w,channel)
x <= threshold || return false
end
return true
end
################################################################################
################################################################################
export canonical_geq_95, canonical_geq_90, canonical_geq_85, canonical_geq_80, canonical_geq_75, canonical_geq_70, canonical_geq_60,
canonical_leq_95, canonical_leq_90, canonical_leq_85, canonical_leq_80, canonical_leq_75, canonical_leq_70, canonical_leq_60
# ⪴_α and ⪳_α, that is, "*at least α⋅100 percent* of the values on this world are at least, or at most ..."
struct CanonicalFeatureGeqSoft <: TestOperatorPositive
alpha :: AbstractFloat
CanonicalFeatureGeqSoft(a::T) where {T<:Real} = (a > 0 && a < 1) ? new(a) : throw_n_log("Invalid instantiation for test operator: CanonicalFeatureGeqSoft($(a))")
end;
struct CanonicalFeatureLeqSoft <: TestOperatorNegative
alpha :: AbstractFloat
CanonicalFeatureLeqSoft(a::T) where {T<:Real} = (a > 0 && a < 1) ? new(a) : throw_n_log("Invalid instantiation for test operator: CanonicalFeatureLeqSoft($(a))")
end;
const canonical_geq_95 = CanonicalFeatureGeqSoft((Rational(95,100)));
const canonical_geq_90 = CanonicalFeatureGeqSoft((Rational(90,100)));
const canonical_geq_85 = CanonicalFeatureGeqSoft((Rational(85,100)));
const canonical_geq_80 = CanonicalFeatureGeqSoft((Rational(80,100)));
const canonical_geq_75 = CanonicalFeatureGeqSoft((Rational(75,100)));
const canonical_geq_70 = CanonicalFeatureGeqSoft((Rational(70,100)));
const canonical_geq_60 = CanonicalFeatureGeqSoft((Rational(60,100)));
const canonical_leq_95 = CanonicalFeatureLeqSoft((Rational(95,100)));
const canonical_leq_90 = CanonicalFeatureLeqSoft((Rational(90,100)));
const canonical_leq_85 = CanonicalFeatureLeqSoft((Rational(85,100)));
const canonical_leq_80 = CanonicalFeatureLeqSoft((Rational(80,100)));
const canonical_leq_75 = CanonicalFeatureLeqSoft((Rational(75,100)));
const canonical_leq_70 = CanonicalFeatureLeqSoft((Rational(70,100)));
const canonical_leq_60 = CanonicalFeatureLeqSoft((Rational(60,100)));
alpha(x::CanonicalFeatureGeqSoft) = x.alpha
alpha(x::CanonicalFeatureLeqSoft) = x.alpha
# dual_test_operator(x::CanonicalFeatureGeqSoft) = TestOpNone
# dual_test_operator(x::CanonicalFeatureLeqSoft) = TestOpNone
# TODO The dual_test_operators for CanonicalFeatureGeqSoft(alpha) is TestOpLeSoft(1-alpha), which is not defined yet.
# Define it, together with their dual_test_operator and computePropositionalThresholdDual
# dual_test_operator(x::CanonicalFeatureGeqSoft) = throw_n_log("If you use $(x), need to write computeModalThresholdDual for the primal test operator.")
# dual_test_operator(x::CanonicalFeatureLeqSoft) = throw_n_log("If you use $(x), need to write computeModalThresholdDual for the primal test operator.")
primary_test_operator(x::CanonicalFeatureGeqSoft) = x
primary_test_operator(x::CanonicalFeatureLeqSoft) = dual_test_operator(x)
const SoftenedOperators = [
canonical_geq_95, canonical_leq_95,
canonical_geq_90, canonical_leq_90,
canonical_geq_80, canonical_leq_80,
canonical_geq_85, canonical_leq_85,
canonical_geq_75, canonical_leq_75,
canonical_geq_70, canonical_leq_70,
canonical_geq_60, canonical_leq_60,
]
siblings(x::Union{CanonicalFeatureGeqSoft,CanonicalFeatureLeqSoft}) = SoftenedOperators
Base.show(io::IO, test_operator::CanonicalFeatureGeqSoft) = print(io, "⪴" * subscriptnumber(rstrip(rstrip(string(alpha(test_operator)*100), '0'), '.')))
Base.show(io::IO, test_operator::CanonicalFeatureLeqSoft) = print(io, "⪳" * subscriptnumber(rstrip(rstrip(string(alpha(test_operator)*100), '0'), '.')))
# TODO improved version for Rational numbers
# TODO check
@inline test_op_partialsort!(test_op::CanonicalFeatureGeqSoft, vals::Vector{T}) where {T} =
partialsort!(vals,ceil(Int, alpha(test_op)*length(vals)); rev=true)
@inline test_op_partialsort!(test_op::CanonicalFeatureLeqSoft, vals::Vector{T}) where {T} =
partialsort!(vals,ceil(Int, alpha(test_op)*length(vals)))
@inline computePropositionalThreshold(test_op::Union{CanonicalFeatureGeqSoft,CanonicalFeatureLeqSoft}, w::AbstractWorld, channel::DimensionalChannel{T,N}) where {T,N} = begin
vals = vec(ch_readWorld(w,channel))
test_op_partialsort!(test_op,vals)
end
# @inline computePropositionalThresholdDual(test_op::CanonicalFeatureGeqSoft, w::AbstractWorld, channel::DimensionalChannel{T,N}) where {T,N} = begin
# vals = vec(ch_readWorld(w,channel))
# xmin = test_op_partialsort!(test_op,vec(ch_readWorld(w,channel)))
# xmin = partialsort!(vals,ceil(Int, alpha(test_op)*length(vals)); rev=true)
# xmax = partialsort!(vals,ceil(Int, (alpha(test_op))*length(vals)))
# xmin,xmax
# end
@inline computePropositionalThresholdMany(test_ops::Vector{<:TestOperator}, w::AbstractWorld, channel::DimensionalChannel{T,N}) where {T,N} = begin
vals = vec(ch_readWorld(w,channel))
(test_op_partialsort!(test_op,vals) for test_op in test_ops)
end
@inline test_decision(test_operator::CanonicalFeatureGeqSoft, w::AbstractWorld, channel::DimensionalChannel{T,N}, threshold::Number) where {T,N} = begin
ys = 0
# TODO write with reduce, and optimize it (e.g. by stopping early if the decision is reached already)
vals = ch_readWorld(w,channel)
for x in vals
if x >= threshold
ys+=1
end
end
(ys/length(vals)) >= test_operator.alpha
end
@inline test_decision(test_operator::CanonicalFeatureLeqSoft, w::AbstractWorld, channel::DimensionalChannel{T,N}, threshold::Number) where {T,N} = begin
ys = 0
# TODO write with reduce, and optimize it (e.g. by stopping early if the decision is reached already)
vals = ch_readWorld(w,channel)
for x in vals
if x <= threshold
ys+=1
end
end
(ys/length(vals)) >= test_operator.alpha
end
################################################################################
################################################################################
const all_lowlevel_test_operators = [
canonical_geq, canonical_leq,
SoftenedOperators...
]
const all_ordered_test_operators = [
canonical_geq, canonical_leq,
SoftenedOperators...
]
const all_test_operators_order = [
canonical_geq, canonical_leq,
SoftenedOperators...
]
sort_test_operators!(x::Vector{TO}) where {TO<:TestOperator} = begin
intersect(all_test_operators_order, x)
end
################################################################################
################################################################################
function test_decision(
X::DimensionalDataset{T},
i_sample::Integer,
w::AbstractWorld,
feature::AbstractFeature,
test_operator::OrderingTestOperator,
threshold::T) where {T}
test_decision(X, i_sample, w, feature, existential_aggregator(test_operator), threshold)
end
function test_decision(
X::DimensionalDataset{T},
i_sample::Integer,
w::AbstractWorld,
feature::AbstractFeature... ,
aggregator::typeof(maximum),
threshold::T) where {T}
values = get_values ... (X, i_sample, w, feature.i_attribute...) ch_readWorld(w,channel)
all_broadcast_sc(values, test_operator, threshold)
end
function test_decision(
X::InterpretedModalDataset{T},
i_sample::Integer,
w::AbstractWorld,
feature::AbstractFeature,
test_operator::TestOperatorFun,
threshold::T) where {T}
test_decision(X.domain, i_sample, w, feature, test_operator, threshold)
end
############################################################################################
############################################################################################
function computePropositionalThreshold(feature::AbstractFeature, w::AbstractWorld, instance::DimensionalInstance{T,N}) where {T,N}
compute_feature(feature, inst_readWorld(w, instance)::DimensionalChannel{T,N-1})::T
end
computeModalThresholdDual(test_operator::TestOperatorFun, w::WorldType, relation::R where R<:_UnionOfRelations{relsTuple}, channel::DimensionalChannel{T,N}) where {WorldType<:AbstractWorld,T,N} =
computePropositionalThresholdDual(test_operator, w, channel)
fieldtypes(relsTuple)
compute_modal_gamma(test_operator::TestOperatorFun, w::WorldType, relation::R where R<:_UnionOfRelations{relsTuple}, channel::DimensionalChannel{T,N}) where {WorldType<:AbstractWorld,T,N} =
computePropositionalThreshold(test_operator, w, channel)
fieldtypes(relsTuple)
#=
# needed for GAMMAS
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprMax{Interval}, channel::DimensionalChannel{T,1}) where {T} =
reverse(extrema(ch_readWorld(repr.w, channel)))::NTuple{2,T}
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprMin{Interval}, channel::DimensionalChannel{T,1}) where {T} =
extrema(ch_readWorld(repr.w, channel))::NTuple{2,T}
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprVal{Interval}, channel::DimensionalChannel{T,1}) where {T} =
(channel[repr.w.x],channel[repr.w.x])::NTuple{2,T}
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprNone{Interval}, channel::DimensionalChannel{T,1}) where {T} =
(typemin(T),typemax(T))::NTuple{2,T}
yieldRepr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, repr::_ReprMax{Interval}, channel::DimensionalChannel{T,1}) where {T} =
maximum(ch_readWorld(repr.w, channel))::T
yieldRepr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, repr::_ReprMin{Interval}, channel::DimensionalChannel{T,1}) where {T} =
minimum(ch_readWorld(repr.w, channel))::T
yieldRepr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, repr::_ReprVal{Interval}, channel::DimensionalChannel{T,1}) where {T} =
channel[repr.w.x]::T
yieldRepr(test_operator::CanonicalFeatureGeq, repr::_ReprNone{Interval}, channel::DimensionalChannel{T,1}) where {T} =
typemin(T)::T
yieldRepr(test_operator::CanonicalFeatureLeq, repr::_ReprNone{Interval}, channel::DimensionalChannel{T,1}) where {T} =
typemax(T)::T
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_RelationGlob, X::Integer) = _ReprMax(Interval(1,X+1))
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_RelationGlob, X::Integer) = _ReprMin(Interval(1,X+1))
# TODO optimize relationGlob
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, r::R where R<:AbstractRelation, channel::DimensionalChannel{T,1}) where {T} =
yieldReprs(test_operator, enum_acc_repr(test_operator, w, r, size(channel)...), channel)
computeModalThreshold(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval, r::R where R<:AbstractRelation, channel::DimensionalChannel{T,1}) where {T} =
yieldRepr(test_operator, enum_acc_repr(test_operator, w, r, size(channel)...), channel)
# TODO optimize relationGlob?
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_RelationGlob, channel::DimensionalChannel{T,1}) where {T} = begin
# # X = length(channel)
# # println("Check!")
# # println(test_operator)
# # println(w)
# # println(relation)
# # println(channel)
# # println(computePropositionalThresholdDual(test_operator, Interval(1,X+1), channel))
# # readline()
# # computePropositionalThresholdDual(test_operator, Interval(1,X+1), channel)
# reverse(extrema(channel))
# end
# computeModalThreshold(test_operator::CanonicalFeatureGeq, w::Interval, ::_RelationGlob, channel::DimensionalChannel{T,1}) where {T} = begin
# # TODO optimize this by replacing readworld with channel[1:X]...
# # X = length(channel)
# # maximum(ch_readWorld(Interval(1,X+1),channel))
# maximum(channel)
# end
# computeModalThreshold(test_operator::CanonicalFeatureLeq, w::Interval, ::_RelationGlob, channel::DimensionalChannel{T,1}) where {T} = begin
# # TODO optimize this by replacing readworld with channel[1:X]...
# # X = length(channel)
# # minimum(ch_readWorld(Interval(1,X+1),channel))
# minimum(channel)
# end
ch_readWorld(w::Interval, channel::DimensionalChannel{T,1}) where {T} = channel[w.x:w.y-1]
=#
#=
# needed for GAMMAS
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprMax{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
reverse(extrema(ch_readWorld(repr.w, channel)))::NTuple{2,T}
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprMin{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
extrema(ch_readWorld(repr.w, channel))::NTuple{2,T}
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprVal{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
(channel[repr.w.x.x, repr.w.y.x],channel[repr.w.x.x, repr.w.y.x])::NTuple{2,T}
yieldReprs(test_operator::CanonicalFeatureGeq, repr::_ReprNone{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
(typemin(T),typemax(T))::NTuple{2,T}
yieldRepr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, repr::_ReprMax{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
maximum(ch_readWorld(repr.w, channel))::T
yieldRepr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, repr::_ReprMin{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
minimum(ch_readWorld(repr.w, channel))::T
yieldRepr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, repr::_ReprVal{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
channel[repr.w.x.x, repr.w.y.x]::T
yieldRepr(test_operator::CanonicalFeatureGeq, repr::_ReprNone{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
typemin(T)::T
yieldRepr(test_operator::CanonicalFeatureLeq, repr::_ReprNone{Interval2D}, channel::DimensionalChannel{T,2}) where {T} =
typemax(T)::T
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, ::_RelationGlob, X::Integer, Y::Integer) = _ReprMax(Interval2D(Interval(1,X+1), Interval(1,Y+1)))
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, ::_RelationGlob, X::Integer, Y::Integer) = _ReprMin(Interval2D(Interval(1,X+1), Interval(1,Y+1)))
# TODO write only one ExtremeModal/ExtremaModal
# TODO optimize relationGlob
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::R where R<:AbstractRelation, channel::DimensionalChannel{T,2}) where {T} = begin
# if (channel == [412 489 559 619 784; 795 771 1317 854 1256; 971 874 878 1278 560] && w.x.x==1 && w.x.y==3 && w.y.x==3 && w.y.y==4)
# println(enum_acc_repr(test_operator, w, r, size(channel)...))
# readline()
# end
yieldReprs(test_operator, enum_acc_repr(test_operator, w, r, size(channel)...), channel)
end
compute_modal_gamma(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval2D, r::R where R<:AbstractRelation, channel::DimensionalChannel{T,2}) where {T} =
yieldRepr(test_operator, enum_acc_repr(test_operator, w, r, size(channel)...), channel)
# channel = [1,2,3,2,8,349,0,830,7290,298,20,29,2790,27,90279,270,2722,79072,0]
# w = ModalLogic.Interval(3,9)
# # w = ModalLogic.Interval(3,4)
# for relation in ModalLogic.IARelations
# ModalLogic.computeModalThresholdDual(canonical_geq, w, relation, channel)
# end
# channel2 = randn(3,4)
# channel2[1:3,1]
# channel2[1:3,2]
# channel2[1:3,3]
# channel2[1:3,4]
# vals=channel2
# mapslices(maximum, vals, dims=1)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, ::_RelationGlob, channel::DimensionalChannel{T,2}) where {T} = begin
# # X = size(channel, 1)
# # Y = size(channel, 2)
# # println("Check!")
# # println(test_operator)
# # println(w)
# # println(relation)
# # println(channel)
# # println(computePropositionalThresholdDual(test_operator, Interval2D(Interval(1,X+1), Interval(1, Y+1)), channel))
# # readline()
# # computePropositionalThresholdDual(test_operator, Interval2D(Interval(1,X+1), Interval(1, Y+1)), channel)
# reverse(extrema(channel))
# end
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval2D, ::_RelationGlob, channel::DimensionalChannel{T,2}) where {T} = begin
# # TODO optimize this by replacing readworld with channel[1:X]...
# # X = size(channel, 1)
# # Y = size(channel, 2)
# # maximum(channel[1:X,1:Y])
# maximum(channel)
# end
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval2D, ::_RelationGlob, channel::DimensionalChannel{T,2}) where {T} = begin
# # TODO optimize this by replacing readworld with channel[1:X]...
# # X = size(channel, 1)
# # Y = size(channel, 2)
# # println(channel)
# # println(w)
# # println(minimum(channel[1:X,1:Y]))
# # readline()
# # minimum(channel[1:X,1:Y])
# minimum(channel)
# end
@inline ch_readWorld(w::Interval2D, channel::DimensionalChannel{T,2}) where {T} = channel[w.x.x:w.x.y-1,w.y.x:w.y.y-1]
=#
# Other options:
# accessibles2_1_2(S::AbstractWorldSet{Interval}, ::_IA_L, X::Integer) =
# IterTools.imap(Interval, _accessibles(Base.argmin((w.y for w in S)), IA_L, X))
# accessibles2_1_2(S::AbstractWorldSet{Interval}, ::_IA_Li, X::Integer) =
# IterTools.imap(Interval, _accessibles(Base.argmax((w.x for w in S)), IA_Li, X))
# accessibles2_2(S::AbstractWorldSet{Interval}, ::_IA_L, X::Integer) = begin
# m = argmin(map((w)->w.y, S))
# IterTools.imap(Interval, _accessibles([w for (i,w) in enumerate(S) if i == m][1], IA_L, X))
# end
# accessibles2_2(S::AbstractWorldSet{Interval}, ::_IA_Li, X::Integer) = begin
# m = argmax(map((w)->w.x, S))
# IterTools.imap(Interval, _accessibles([w for (i,w) in enumerate(S) if i == m][1], IA_Li, X))
# end
# # This makes sense if we have 2-Tuples instead of intervals
# function snd((a,b)::Tuple) b end
# function fst((a,b)::Tuple) a end
# accessibles2_1(S::AbstractWorldSet{Interval}, ::_IA_L, X::Integer) =
# IterTools.imap(Interval,
# _accessibles(S[argmin(map(snd, S))], IA_L, X)
# )
# accessibles2_1(S::AbstractWorldSet{Interval}, ::_IA_Li, X::Integer) =
# IterTools.imap(Interval,
# _accessibles(S[argmax(map(fst, S))], IA_Li, X)
# )
#=
# TODO parametrize on the test_operator. These are wrong anyway...
# Note: these conditions are the ones that make a modal_step inexistent
enum_acc_repr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval, ::_IA_A, X::Integer) = (w.y < X+1) ? _ReprVal(Interval(w.y, w.y+1) ) : _ReprNone{Interval}() # [Interval(w.y, X+1)] : Interval[]
enum_acc_repr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval, ::_IA_Ai, X::Integer) = (1 < w.x) ? _ReprVal(Interval(w.x-1, w.x) ) : _ReprNone{Interval}() # [Interval(1, w.x)] : Interval[]
enum_acc_repr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval, ::_IA_B, X::Integer) = (w.x < w.y-1) ? _ReprVal(Interval(w.x, w.x+1) ) : _ReprNone{Interval}() # [Interval(w.x, w.y-1)] : Interval[]
enum_acc_repr(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval, ::_IA_E, X::Integer) = (w.x+1 < w.y) ? _ReprVal(Interval(w.y-1, w.y) ) : _ReprNone{Interval}() # [Interval(w.x+1, w.y)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_L, X::Integer) = (w.y+1 < X+1) ? _ReprMax(Interval(w.y+1, X+1) ) : _ReprNone{Interval}() # [Interval(w.y+1, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Li, X::Integer) = (1 < w.x-1) ? _ReprMax(Interval(1, w.x-1) ) : _ReprNone{Interval}() # [Interval(1, w.x-1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_D, X::Integer) = (w.x+1 < w.y-1) ? _ReprMax(Interval(w.x+1, w.y-1) ) : _ReprNone{Interval}() # [Interval(w.x+1, w.y-1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_L, X::Integer) = (w.y+1 < X+1) ? _ReprMin(Interval(w.y+1, X+1) ) : _ReprNone{Interval}() # [Interval(w.y+1, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Li, X::Integer) = (1 < w.x-1) ? _ReprMin(Interval(1, w.x-1) ) : _ReprNone{Interval}() # [Interval(1, w.x-1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_D, X::Integer) = (w.x+1 < w.y-1) ? _ReprMin(Interval(w.x+1, w.y-1) ) : _ReprNone{Interval}() # [Interval(w.x+1, w.y-1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Bi, X::Integer) = (w.y < X+1) ? _ReprMin(Interval(w.x, w.y+1) ) : _ReprNone{Interval}() # [Interval(w.x, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Ei, X::Integer) = (1 < w.x) ? _ReprMin(Interval(w.x-1, w.y) ) : _ReprNone{Interval}() # [Interval(1, w.y)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Di, X::Integer) = (1 < w.x && w.y < X+1) ? _ReprMin(Interval(w.x-1, w.y+1) ) : _ReprNone{Interval}() # [Interval(1, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_O, X::Integer) = (w.x+1 < w.y && w.y < X+1) ? _ReprMin(Interval(w.y-1, w.y+1) ) : _ReprNone{Interval}() # [Interval(w.x+1, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Oi, X::Integer) = (1 < w.x && w.x+1 < w.y) ? _ReprMin(Interval(w.x-1, w.x+1) ) : _ReprNone{Interval}() # [Interval(1, w.y-1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Bi, X::Integer) = (w.y < X+1) ? _ReprMax(Interval(w.x, w.y+1) ) : _ReprNone{Interval}() # [Interval(w.x, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Ei, X::Integer) = (1 < w.x) ? _ReprMax(Interval(w.x-1, w.y) ) : _ReprNone{Interval}() # [Interval(1, w.y)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Di, X::Integer) = (1 < w.x && w.y < X+1) ? _ReprMax(Interval(w.x-1, w.y+1) ) : _ReprNone{Interval}() # [Interval(1, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_O, X::Integer) = (w.x+1 < w.y && w.y < X+1) ? _ReprMax(Interval(w.y-1, w.y+1) ) : _ReprNone{Interval}() # [Interval(w.x+1, X+1)] : Interval[]
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Oi, X::Integer) = (1 < w.x && w.x+1 < w.y) ? _ReprMax(Interval(w.x-1, w.x+1) ) : _ReprNone{Interval}() # [Interval(1, w.y-1)] : Interval[]
=#
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_A, channel::DimensionalChannel{T,1}) where {T} =
# (w.y < length(channel)+1) ? (channel[w.y],channel[w.y]) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_A, channel::DimensionalChannel{T,1}) where {T} =
# (w.y < length(channel)+1) ? channel[w.y] : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_A, channel::DimensionalChannel{T,1}) where {T} =
# (w.y < length(channel)+1) ? channel[w.y] : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Ai, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x) ? (channel[w.x-1],channel[w.x-1]) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Ai, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x) ? channel[w.x-1] : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Ai, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x) ? channel[w.x-1] : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_L, channel::DimensionalChannel{T,1}) where {T} =
# (w.y+1 < length(channel)+1) ? reverse(extrema(channel[w.y+1:length(channel)])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_L, channel::DimensionalChannel{T,1}) where {T} =
# (w.y+1 < length(channel)+1) ? maximum(channel[w.y+1:length(channel)]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_L, channel::DimensionalChannel{T,1}) where {T} =
# (w.y+1 < length(channel)+1) ? minumum(channel[w.y+1:length(channel)]) : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Li, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x-1) ? reverse(extrema(channel[1:w.x-2])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Li, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x-1) ? maximum(channel[1:w.x-2]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Li, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x-1) ? minumum(channel[1:w.x-2]) : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_B, channel::DimensionalChannel{T,1}) where {T} =
# (w.x < w.y-1) ? (channel[w.x],channel[w.x]) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_B, channel::DimensionalChannel{T,1}) where {T} =
# (w.x < w.y-1) ? channel[w.x] : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_B, channel::DimensionalChannel{T,1}) where {T} =
# (w.x < w.y-1) ? channel[w.x] : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Bi, channel::DimensionalChannel{T,1}) where {T} =
# (w.y < length(channel)+1) ? (minimum(channel[w.x:w.y-1+1]),maximum(channel[w.x:w.y-1+1])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Bi, channel::DimensionalChannel{T,1}) where {T} =
# (w.y < length(channel)+1) ? minimum(channel[w.x:w.y-1+1]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Bi, channel::DimensionalChannel{T,1}) where {T} =
# (w.y < length(channel)+1) ? maximum(channel[w.x:w.y-1+1]) : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_E, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y) ? (channel[w.y-1],channel[w.y-1]) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_E, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y) ? channel[w.y-1] : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_E, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y) ? channel[w.y-1] : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Ei, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x) ? (minimum(channel[w.x-1:w.y-1]),maximum(channel[w.x-1:w.y-1])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Ei, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x) ? minimum(channel[w.x-1:w.y-1]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Ei, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x) ? maximum(channel[w.x-1:w.y-1]) : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_D, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y-1) ? reverse(extrema(channel[w.x+1:w.y-1-1])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_D, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y-1) ? maximum(channel[w.x+1:w.y-1-1]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_D, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y-1) ? minumum(channel[w.x+1:w.y-1-1]) : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Di, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x && w.y < length(channel)+1) ? (minimum(channel[w.x-1:w.y-1+1]),maximum(channel[w.x-1:w.y-1+1])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Di, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x && w.y < length(channel)+1) ? minimum(channel[w.x-1:w.y-1+1]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Di, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x && w.y < length(channel)+1) ? maximum(channel[w.x-1:w.y-1+1]) : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_O, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y && w.y < length(channel)+1) ? (minimum(channel[w.y-1:w.y-1+1]),maximum(channel[w.y-1:w.y-1+1])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_O, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y && w.y < length(channel)+1) ? minimum(channel[w.y-1:w.y-1+1]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_O, channel::DimensionalChannel{T,1}) where {T} =
# (w.x+1 < w.y && w.y < length(channel)+1) ? maximum(channel[w.y-1:w.y-1+1]) : typemin(T)
# computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Oi, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x && w.x+1 < w.y) ? (minimum(channel[w.x-1:w.x]),maximum(channel[w.x-1:w.x])) : (typemax(T),typemin(T))
# compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval, ::_IA_Oi, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x && w.x+1 < w.y) ? minimum(channel[w.x-1:w.x]) : typemax(T)
# compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval, ::_IA_Oi, channel::DimensionalChannel{T,1}) where {T} =
# (1 < w.x && w.x+1 < w.y) ? maximum(channel[w.x-1:w.x]) : typemin(T)
# enum_acc_repr for _IA2D_URelations
# 3 operator categories for the 13+1 relations
const _IA2DRelMaximizer = Union{_RelationGlob,_IA_L,_IA_Li,_IA_D}
const _IA2DRelMinimizer = Union{_RelationId,_IA_O,_IA_Oi,_IA_Bi,_IA_Ei,_IA_Di}
const _IA2DRelSingleVal = Union{_IA_A,_IA_Ai,_IA_B,_IA_E}
#=
################################################################################
################################################################################
# TODO remove (needed for GAMMAS)
# Utility type for enhanced computation of thresholds
abstract type _ReprTreatment end
struct _ReprFake{WorldType<:AbstractWorld} <: _ReprTreatment w :: WorldType end
struct _ReprMax{WorldType<:AbstractWorld} <: _ReprTreatment w :: WorldType end
struct _ReprMin{WorldType<:AbstractWorld} <: _ReprTreatment w :: WorldType end
struct _ReprVal{WorldType<:AbstractWorld} <: _ReprTreatment w :: WorldType end
struct _ReprNone{WorldType<:AbstractWorld} <: _ReprTreatment end
# enum_acc_repr(::CanonicalFeatureGeq, w::WorldType, ::_RelationId, XYZ::Vararg{Integer,N}) where {WorldType<:AbstractWorld,N} = _ReprMin(w)
# enum_acc_repr(::CanonicalFeatureLeq, w::WorldType, ::_RelationId, XYZ::Vararg{Integer,N}) where {WorldType<:AbstractWorld,N} = _ReprMax(w)
@inline enum_acc_repr2D(test_operator::TestOperator, w::Interval2D, rx::R1 where R1<:AbstractRelation, ry::R2 where R2<:AbstractRelation, X::Integer, Y::Integer, _ReprConstructor::Type{rT}) where {rT<:_ReprTreatment} = begin
x = enum_acc_repr(test_operator, w.x, rx, X)
# println(x)
if x == _ReprNone{Interval}()
return _ReprNone{Interval2D}()
end
y = enum_acc_repr(test_operator, w.y, ry, Y)
# println(y)
if y == _ReprNone{Interval}()
return _ReprNone{Interval2D}()
end
return _ReprConstructor(Interval2D(x.w, y.w))
end
# 3*3 = 9 cases ((13+1)^2 = 196 relations)
# Maximizer operators
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMaximizer,R2<:_IA2DRelMaximizer}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMax)
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMinimizer,R2<:_IA2DRelMinimizer}, X::Integer, Y::Integer) = begin
# println(enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMin))
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMin)
end
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelSingleVal,R2<:_IA2DRelSingleVal}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprVal)
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMaximizer,R2<:_IA2DRelSingleVal}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMax)
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMinimizer,R2<:_IA2DRelSingleVal}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMin)
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelSingleVal,R2<:_IA2DRelMaximizer}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMax)
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelSingleVal,R2<:_IA2DRelMinimizer}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMin)
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMaximizer,R2<:_IA2DRelMaximizer}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMin)
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMinimizer,R2<:_IA2DRelMinimizer}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMax)
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelSingleVal,R2<:_IA2DRelSingleVal}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprVal)
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMaximizer,R2<:_IA2DRelSingleVal}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMin)
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMinimizer,R2<:_IA2DRelSingleVal}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMax)
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelSingleVal,R2<:_IA2DRelMaximizer}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMin)
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelSingleVal,R2<:_IA2DRelMinimizer}, X::Integer, Y::Integer) =
enum_acc_repr2D(test_operator, w, r.x, r.y, X, Y, _ReprMax)
# The last two cases are difficult to express with enum_acc_repr, better do it at computeModalThresholdDual instead
# TODO create a dedicated min/max combination representation?
yieldMinMaxCombinations(test_operator::CanonicalFeatureGeq, productRepr::_ReprTreatment, channel::DimensionalChannel{T,2}, dims::Integer) where {T} = begin
if productRepr == _ReprNone{Interval2D}()
return typemin(T),typemax(T)
end
vals = ch_readWorld(productRepr.w, channel)
# TODO try: maximum(mapslices(minimum, vals, dims=1)),minimum(mapslices(maximum, vals, dims=1))
extr = vec(mapslices(extrema, vals, dims=dims))
# println(extr)
maxExtrema(extr)
end
yieldMinMaxCombination(test_operator::CanonicalFeatureGeq, productRepr::_ReprTreatment, channel::DimensionalChannel{T,2}, dims::Integer) where {T} = begin
if productRepr == _ReprNone{Interval2D}()
return typemin(T)
end
vals = ch_readWorld(productRepr.w, channel)
maximum(mapslices(minimum, vals, dims=dims))
end
yieldMinMaxCombination(test_operator::CanonicalFeatureLeq, productRepr::_ReprTreatment, channel::DimensionalChannel{T,2}, dims::Integer) where {T} = begin
if productRepr == _ReprNone{Interval2D}()
return typemax(T)
end
vals = ch_readWorld(productRepr.w, channel)
minimum(mapslices(maximum, vals, dims=dims))
end
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMinimizer,R2<:_IA2DRelMaximizer}, channel::DimensionalChannel{T,2}) where {T} = begin
yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, r.x, r.y, size(channel)..., _ReprFake), channel, 1)
end
compute_modal_gamma(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMinimizer,R2<:_IA2DRelMaximizer}, channel::DimensionalChannel{T,2}) where {T} = begin
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, r.x, r.y, size(channel)..., _ReprFake), channel, 1)
end
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMaximizer,R2<:_IA2DRelMinimizer}, channel::DimensionalChannel{T,2}) where {T} = begin
yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, r.x, r.y, size(channel)..., _ReprFake), channel, 2)
end
compute_modal_gamma(test_operator::Union{CanonicalFeatureGeq,CanonicalFeatureLeq}, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMaximizer,R2<:_IA2DRelMinimizer}, channel::DimensionalChannel{T,2}) where {T} = begin
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, r.x, r.y, size(channel)..., _ReprFake), channel, 2)
end
=#
# TODO: per CanonicalFeatureLeq gli operatori si invertono
const _IA2DRelMax = Union{_RelationGlob,_IA_L,_IA_Li,_IA_D}
const _IA2DRelMin = Union{_RelationId,_IA_O,_IA_Oi,_IA_Bi,_IA_Ei,_IA_Di}
const _IA2DRelVal = Union{_IA_A,_IA_Ai,_IA_B,_IA_E}
# accessibles_aggr(f::Union{SingleAttributeMin,SingleAttributeMax}, a::Union{typeof(minimum),typeof(maximum)}, w::Interval2D, r::_IA2DRel{R1,R2} where {R1<:_IA2DRelMax,R2<:_IA2DRelMax}, X::Integer) = IterTools.imap(Interval2D, Iterators.product(accessibles_aggr(f, a, w.x, rx, X), accessibles_aggr(f, a, w.y, ry, Y)))
#=
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::RCC5Relation, channel::DimensionalChannel{T,2}) where {T} = begin
maxExtrema(
map((RCC8_r)->(computeModalThresholdDual(test_operator, w, RCC8_r, channel)), RCC52RCC8Relations(r))
)
end
compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval2D, r::RCC5Relation, channel::DimensionalChannel{T,2}) where {T} = begin
maximum(
map((RCC8_r)->(compute_modal_gamma(test_operator, w, RCC8_r, channel)), RCC52RCC8Relations(r))
)
end
compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval2D, r::RCC5Relation, channel::DimensionalChannel{T,2}) where {T} = begin
mininimum(
map((RCC8_r)->(compute_modal_gamma(test_operator, w, RCC8_r, channel)), RCC52RCC8Relations(r))
)
end
=#
# More efficient implementations for edge cases
# ?
#=
# TODO optimize RCC5
# Virtual relation used for computing Topo_DC on Interval2D
struct _Virtual_Enlarge <: AbstractRelation end; const Virtual_Enlarge = _Virtual_Enlarge(); # Virtual_Enlarge
enlargeInterval(w::Interval, X::Integer) = Interval(max(1,w.x-1),min(w.y+1,X+1))
enum_acc_repr(test_operator::CanonicalFeatureGeq, w::Interval, ::_Virtual_Enlarge, X::Integer) = _ReprMin(enlargeInterval(w,X))
enum_acc_repr(test_operator::CanonicalFeatureLeq, w::Interval, ::_Virtual_Enlarge, X::Integer) = _ReprMax(enlargeInterval(w,X))
# Topo2D2Topo1D(::_Topo_DC) = [
# (RelationGlob , Topo_DC),
# # TODO many many others but for now let's just say...
# (Topo_DC , Virtual_Enlarge),
# ]
Topo2D2Topo1D(::_Topo_EC) = [
(Topo_EC , Topo_EC),
#
(Topo_PO , Topo_EC),
(Topo_TPP , Topo_EC),
(Topo_TPPi , Topo_EC),
(Topo_NTPP , Topo_EC),
(Topo_NTPPi , Topo_EC),
(RelationId , Topo_EC),
#
(Topo_EC , Topo_PO),
(Topo_EC , Topo_TPP),
(Topo_EC , Topo_TPPi),
(Topo_EC , Topo_NTPP),
(Topo_EC , Topo_NTPPi),
(Topo_EC , RelationId),
]
Topo2D2Topo1D(::_Topo_PO) = [
(Topo_PO , Topo_PO),
#
(Topo_PO , Topo_TPP),
(Topo_PO , Topo_TPPi),
(Topo_PO , Topo_NTPP),
(Topo_PO , Topo_NTPPi),
(Topo_PO , RelationId),
#
(Topo_TPP , Topo_PO),
(Topo_TPPi , Topo_PO),
(Topo_NTPP , Topo_PO),
(Topo_NTPPi , Topo_PO),
(RelationId , Topo_PO),
#
(Topo_TPPi , Topo_TPP),
(Topo_TPP , Topo_TPPi),
#
(Topo_NTPP , Topo_TPPi),
(Topo_TPPi , Topo_NTPP),
#
(Topo_NTPPi , Topo_TPP),
(Topo_NTPPi , Topo_NTPP),
(Topo_TPP , Topo_NTPPi),
(Topo_NTPP , Topo_NTPPi),
]
Topo2D2Topo1D(::_Topo_TPP) = [
(Topo_TPP , Topo_TPP),
#
(Topo_NTPP , Topo_TPP),
(Topo_TPP , Topo_NTPP),
#
(RelationId , Topo_TPP),
(Topo_TPP , RelationId),
#
(RelationId , Topo_NTPP),
(Topo_NTPP , RelationId),
]
Topo2D2Topo1D(::_Topo_TPPi) = [
(Topo_TPPi , Topo_TPPi),
#
(Topo_NTPPi , Topo_TPPi),
(Topo_TPPi , Topo_NTPPi),
#
(RelationId , Topo_TPPi),
(Topo_TPPi , RelationId),
#
(RelationId , Topo_NTPPi),
(Topo_NTPPi , RelationId),
]
Topo2D2Topo1D(::_Topo_NTPP) = [
(Topo_NTPP , Topo_NTPP),
]
Topo2D2Topo1D(::_Topo_NTPPi) = [
(Topo_NTPPi , Topo_NTPPi),
]
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_DC, channel::DimensionalChannel{T,2}) where {T} = begin
reprx1 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_L, size(channel)..., _ReprMax)
reprx2 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_Li, size(channel)..., _ReprMax)
repry1 = enum_acc_repr2D(test_operator, w, IA_L, Virtual_Enlarge, size(channel)..., _ReprMax)
repry2 = enum_acc_repr2D(test_operator, w, IA_Li, Virtual_Enlarge, size(channel)..., _ReprMax)
extr = yieldReprs(test_operator, reprx1, channel),
yieldReprs(test_operator, reprx2, channel),
yieldReprs(test_operator, repry1, channel),
yieldReprs(test_operator, repry2, channel)
maxExtrema(extr)
end
compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_DC, channel::DimensionalChannel{T,2}) where {T} = begin
# reprx1 = enum_acc_repr2D(test_operator, w, IA_L, RelationGlob, size(channel)..., _ReprMax)
# reprx2 = enum_acc_repr2D(test_operator, w, IA_Li, RelationGlob, size(channel)..., _ReprMax)
# repry1 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_L, size(channel)..., _ReprMax)
# repry2 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_Li, size(channel)..., _ReprMax)
reprx1 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_L, size(channel)..., _ReprMax)
reprx2 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_Li, size(channel)..., _ReprMax)
repry1 = enum_acc_repr2D(test_operator, w, IA_L, Virtual_Enlarge, size(channel)..., _ReprMax)
repry2 = enum_acc_repr2D(test_operator, w, IA_Li, Virtual_Enlarge, size(channel)..., _ReprMax)
# if channel == [819 958 594; 749 665 383; 991 493 572] && w.x.x==1 && w.x.y==2 && w.y.x==1 && w.y.y==3
# println(max(yieldRepr(test_operator, reprx1, channel),
# yieldRepr(test_operator, reprx2, channel),
# yieldRepr(test_operator, repry1, channel),
# yieldRepr(test_operator, repry2, channel)))
# readline()
# end
max(yieldRepr(test_operator, reprx1, channel),
yieldRepr(test_operator, reprx2, channel),
yieldRepr(test_operator, repry1, channel),
yieldRepr(test_operator, repry2, channel))
end
compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_Topo_DC, channel::DimensionalChannel{T,2}) where {T} = begin
reprx1 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_L, size(channel)..., _ReprMin)
reprx2 = enum_acc_repr2D(test_operator, w, RelationGlob, IA_Li, size(channel)..., _ReprMin)
repry1 = enum_acc_repr2D(test_operator, w, IA_L, Virtual_Enlarge, size(channel)..., _ReprMin)
repry2 = enum_acc_repr2D(test_operator, w, IA_Li, Virtual_Enlarge, size(channel)..., _ReprMin)
min(yieldRepr(test_operator, reprx1, channel),
yieldRepr(test_operator, reprx2, channel),
yieldRepr(test_operator, repry1, channel),
yieldRepr(test_operator, repry2, channel))
end
# EC: Just optimize the values on the outer boundary
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_EC, channel::DimensionalChannel{T,2}) where {T} = begin
X,Y = size(channel)
reprs = [
((w.x.x-1 >= 1) ? [Interval2D(Interval(w.x.x-1,w.x.x),enlargeInterval(w.y,Y))] : Interval2D[])...,
((w.x.y+1 <= X+1) ? [Interval2D(Interval(w.x.y,w.x.y+1),enlargeInterval(w.y,Y))] : Interval2D[])...,
((w.y.x-1 >= 1) ? [Interval2D(enlargeInterval(w.x,X),Interval(w.y.x-1,w.y.x))] : Interval2D[])...,
((w.y.y+1 <= Y+1) ? [Interval2D(enlargeInterval(w.x,X),Interval(w.y.y,w.y.y+1))] : Interval2D[])...,
]
extr = map(w->yieldReprs(test_operator, _ReprMax(w), channel), reprs)
maxExtrema(extr)
end
compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_EC, channel::DimensionalChannel{T,2}) where {T} = begin
X,Y = size(channel)
reprs = [
((w.x.x-1 >= 1) ? [Interval2D(Interval(w.x.x-1,w.x.x),enlargeInterval(w.y,Y))] : Interval2D[])...,
((w.x.y+1 <= X+1) ? [Interval2D(Interval(w.x.y,w.x.y+1),enlargeInterval(w.y,Y))] : Interval2D[])...,
((w.y.x-1 >= 1) ? [Interval2D(enlargeInterval(w.x,X),Interval(w.y.x-1,w.y.x))] : Interval2D[])...,
((w.y.y+1 <= Y+1) ? [Interval2D(enlargeInterval(w.x,X),Interval(w.y.y,w.y.y+1))] : Interval2D[])...,
]
extr = map(w->yieldRepr(test_operator, _ReprMax(w), channel), reprs)
maximum([extr..., typemin(T)])
end
compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_Topo_EC, channel::DimensionalChannel{T,2}) where {T} = begin
X,Y = size(channel)
reprs = [
((w.x.x-1 >= 1) ? [Interval2D(Interval(w.x.x-1,w.x.x),enlargeInterval(w.y,Y))] : Interval2D[])...,
((w.x.y+1 <= X+1) ? [Interval2D(Interval(w.x.y,w.x.y+1),enlargeInterval(w.y,Y))] : Interval2D[])...,
((w.y.x-1 >= 1) ? [Interval2D(enlargeInterval(w.x,X),Interval(w.y.x-1,w.y.x))] : Interval2D[])...,
((w.y.y+1 <= Y+1) ? [Interval2D(enlargeInterval(w.x,X),Interval(w.y.y,w.y.y+1))] : Interval2D[])...,
]
extr = map(w->yieldRepr(test_operator, _ReprMin(w), channel), reprs)
minimum([extr..., typemax(T)])
end
# PO: For each pair crossing the border, perform a minimization step and then a maximization step
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_PO, channel::DimensionalChannel{T,2}) where {T} = begin
# if true &&
# # (channel == [1620 1408 1343; 1724 1398 1252; 1177 1703 1367] && w.x.x==1 && w.x.y==3 && w.y.x==3 && w.y.y==4) ||
# # (channel == [412 489 559 619 784; 795 771 1317 854 1256; 971 874 878 1278 560] && w.x.x==1 && w.x.y==3 && w.y.x==3 && w.y.y==4)
# (channel == [2405 2205 1898 1620 1383; 1922 1555 1383 1393 1492; 1382 1340 1434 1640 1704] && w.x.x==1 && w.x.y==3 && w.y.x==3 && w.y.y==4)
# x_singleton = ! (w.x.x < w.x.y-1)
# y_singleton = ! (w.y.x < w.y.y-1)
# if x_singleton && y_singleton
# println(typemin(T),typemax(T))
# else
# rx1,rx2 = x_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
# ry1,ry2 = y_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
# println(rx1)
# println(rx2)
# println(ry1)
# println(ry2)
# # reprx1 = enum_acc_repr2D(test_operator, w, rx1, RelationId, size(channel)..., _ReprMin)
# # reprx2 = enum_acc_repr2D(test_operator, w, rx2, RelationId, size(channel)..., _ReprMin)
# # repry1 = enum_acc_repr2D(test_operator, w, RelationId, ry1, size(channel)..., _ReprMin)
# # repry2 = enum_acc_repr2D(test_operator, w, RelationId, ry2, size(channel)..., _ReprMin)
# # println(reprx1)
# # println(reprx2)
# # println(repry1)
# # println(repry2)
# println(
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry1, size(channel)..., _ReprFake), channel, 2)
# )
# println(
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry2, size(channel)..., _ReprFake), channel, 2)
# )
# println(
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, rx1, RelationId, size(channel)..., _ReprFake), channel, 1)
# )
# println(
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, rx2, RelationId, size(channel)..., _ReprFake), channel, 1)
# )
# println(
# maxExtrema((
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry1, size(channel)..., _ReprFake), channel, 2),
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry2, size(channel)..., _ReprFake), channel, 2),
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, rx1, RelationId, size(channel)..., _ReprFake), channel, 1),
# yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, rx2, RelationId, size(channel)..., _ReprFake), channel, 1),
# ))
# )
# # println(computeModalThresholdDual(test_operator, w, RectangleRelation(rx1 , RelationId), channel))
# # println(computeModalThresholdDual(test_operator, w, RectangleRelation(rx2 , RelationId), channel))
# # println(computeModalThresholdDual(test_operator, w, RectangleRelation(RelationId , ry1), channel))
# # println(computeModalThresholdDual(test_operator, w, RectangleRelation(RelationId , ry2), channel))
# # println(maxExtrema((
# # computeModalThresholdDual(test_operator, w, RectangleRelation(rx1 , RelationId), channel),
# # computeModalThresholdDual(test_operator, w, RectangleRelation(rx2 , RelationId), channel),
# # computeModalThresholdDual(test_operator, w, RectangleRelation(RelationId , ry1), channel),
# # computeModalThresholdDual(test_operator, w, RectangleRelation(RelationId , ry2), channel),
# # ))
# # )
# end
# readline()
# end
x_singleton = ! (w.x.x < w.x.y-1)
y_singleton = ! (w.y.x < w.y.y-1)
if x_singleton && y_singleton
return typemin(T),typemax(T)
end
rx1,rx2 = x_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
ry1,ry2 = y_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
# reprx1 = enum_acc_repr2D(test_operator, w, rx1, RelationId, size(channel)..., _ReprFake)
# reprx2 = enum_acc_repr2D(test_operator, w, rx2, RelationId, size(channel)..., _ReprFake)
# repry1 = enum_acc_repr2D(test_operator, w, RelationId, ry1, size(channel)..., _ReprFake)
# repry2 = enum_acc_repr2D(test_operator, w, RelationId, ry2, size(channel)..., _ReprFake)
maxExtrema(
yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry1, size(channel)..., _ReprFake), channel, 2),
yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry2, size(channel)..., _ReprFake), channel, 2),
yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, rx1, RelationId, size(channel)..., _ReprFake), channel, 1),
yieldMinMaxCombinations(test_operator, enum_acc_repr2D(test_operator, w, rx2, RelationId, size(channel)..., _ReprFake), channel, 1),
)
end
compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_PO, channel::DimensionalChannel{T,2}) where {T} = begin
# if channel == [1620 1408 1343; 1724 1398 1252; 1177 1703 1367] && w.x.x==1 && w.x.y==3 && w.y.x==3 && w.y.y==4
# println(! (w.x.x < w.x.y-1) && ! (w.y.x < w.y.y-1))
# println(max(
# computeModalThresholdDual(test_operator, w, RectangleRelation(RelationId , IA_O), channel),
# computeModalThresholdDual(test_operator, w, RectangleRelation(RelationId , IA_Oi), channel),
# computeModalThresholdDual(test_operator, w, RectangleRelation(IA_Oi , RelationId), channel),
# computeModalThresholdDual(test_operator, w, RectangleRelation(IA_O , RelationId), channel),
# ))
# readline()
# end
x_singleton = ! (w.x.x < w.x.y-1)
y_singleton = ! (w.y.x < w.y.y-1)
if x_singleton && y_singleton
return typemin(T)
end
rx1,rx2 = x_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
ry1,ry2 = y_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
max(
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry1, size(channel)..., _ReprFake), channel, 2),
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry2, size(channel)..., _ReprFake), channel, 2),
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, rx1, RelationId, size(channel)..., _ReprFake), channel, 1),
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, rx2, RelationId, size(channel)..., _ReprFake), channel, 1),
)
end
compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_Topo_PO, channel::DimensionalChannel{T,2}) where {T} = begin
x_singleton = ! (w.x.x < w.x.y-1)
y_singleton = ! (w.y.x < w.y.y-1)
if x_singleton && y_singleton
return typemax(T)
end
rx1,rx2 = x_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
ry1,ry2 = y_singleton ? (IA_Bi,IA_Ei) : (IA_O,IA_Oi)
min(
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry1, size(channel)..., _ReprFake), channel, 2),
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, RelationId, ry2, size(channel)..., _ReprFake), channel, 2),
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, rx1, RelationId, size(channel)..., _ReprFake), channel, 1),
yieldMinMaxCombination(test_operator, enum_acc_repr2D(test_operator, w, rx2, RelationId, size(channel)..., _ReprFake), channel, 1),
)
end
# TPP: Just optimize the values on the inner boundary
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_TPP, channel::DimensionalChannel{T,2}) where {T} = begin
reprs = if (w.x.x < w.x.y-1) && (w.y.x < w.y.y-1)
[Interval2D(Interval(w.x.x,w.x.x+1),w.y), Interval2D(Interval(w.x.y-1,w.x.y),w.y), Interval2D(w.x,Interval(w.y.x,w.y.x+1)), Interval2D(w.x,Interval(w.y.y-1,w.y.y))]
elseif (w.x.x < w.x.y-1) || (w.y.x < w.y.y-1)
[w]
else Interval2D[]
end
extr = map(w->yieldReprs(test_operator, _ReprMax(w), channel), reprs)
maxExtrema(extr)
end
compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_TPP, channel::DimensionalChannel{T,2}) where {T} = begin
reprs = if (w.x.x < w.x.y-1) && (w.y.x < w.y.y-1)
[Interval2D(Interval(w.x.x,w.x.x+1),w.y), Interval2D(Interval(w.x.y-1,w.x.y),w.y), Interval2D(w.x,Interval(w.y.x,w.y.x+1)), Interval2D(w.x,Interval(w.y.y-1,w.y.y))]
elseif (w.x.x < w.x.y-1) || (w.y.x < w.y.y-1)
[w]
else Interval2D[]
end
extr = map(w->yieldRepr(test_operator, _ReprMax(w), channel), reprs)
maximum([extr..., typemin(T)])
end
compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_Topo_TPP, channel::DimensionalChannel{T,2}) where {T} = begin
reprs = if (w.x.x < w.x.y-1) && (w.y.x < w.y.y-1)
[Interval2D(Interval(w.x.x,w.x.x+1),w.y), Interval2D(Interval(w.x.y-1,w.x.y),w.y), Interval2D(w.x,Interval(w.y.x,w.y.x+1)), Interval2D(w.x,Interval(w.y.y-1,w.y.y))]
elseif (w.x.x < w.x.y-1) || (w.y.x < w.y.y-1)
[w]
else Interval2D[]
end
extr = map(w->yieldRepr(test_operator, _ReprMin(w), channel), reprs)
minimum([extr..., typemax(T)])
end
# TPPi: check 4 possible extensions of the box and perform a minimize+maximize step
computeModalThresholdDual(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_TPPi, channel::DimensionalChannel{T,2}) where {T} = begin
X,Y = size(channel)
reprs = [
((w.x.x-1 >= 1) ? [Interval2D(Interval(w.x.x-1,w.x.y),w.y)] : Interval2D[])...,
((w.x.y+1 <= X+1) ? [Interval2D(Interval(w.x.x,w.x.y+1),w.y)] : Interval2D[])...,
((w.y.x-1 >= 1) ? [Interval2D(w.x,Interval(w.y.x-1,w.y.y))] : Interval2D[])...,
((w.y.y+1 <= Y+1) ? [Interval2D(w.x,Interval(w.y.x,w.y.y+1))] : Interval2D[])...,
]
extr = map(w->yieldReprs(test_operator, _ReprMin(w), channel), reprs)
maxExtrema(extr)
end
compute_modal_gamma(test_operator::CanonicalFeatureGeq, w::Interval2D, r::_Topo_TPPi, channel::DimensionalChannel{T,2}) where {T} = begin
X,Y = size(channel)
reprs = [
((w.x.x-1 >= 1) ? [Interval2D(Interval(w.x.x-1,w.x.y),w.y)] : Interval2D[])...,
((w.x.y+1 <= X+1) ? [Interval2D(Interval(w.x.x,w.x.y+1),w.y)] : Interval2D[])...,
((w.y.x-1 >= 1) ? [Interval2D(w.x,Interval(w.y.x-1,w.y.y))] : Interval2D[])...,
((w.y.y+1 <= Y+1) ? [Interval2D(w.x,Interval(w.y.x,w.y.y+1))] : Interval2D[])...,
]
extr = map(w->yieldRepr(test_operator, _ReprMin(w), channel), reprs)
maximum([extr..., typemin(T)])
end
compute_modal_gamma(test_operator::CanonicalFeatureLeq, w::Interval2D, r::_Topo_TPPi, channel::DimensionalChannel{T,2}) where {T} = begin
X,Y = size(channel)
reprs = [
((w.x.x-1 >= 1) ? [Interval2D(Interval(w.x.x-1,w.x.y),w.y)] : Interval2D[])...,
((w.x.y+1 <= X+1) ? [Interval2D(Interval(w.x.x,w.x.y+1),w.y)] : Interval2D[])...,
((w.y.x-1 >= 1) ? [Interval2D(w.x,Interval(w.y.x-1,w.y.y))] : Interval2D[])...,
((w.y.y+1 <= Y+1) ? [Interval2D(w.x,Interval(w.y.x,w.y.y+1))] : Interval2D[])...,
]
extr = map(w->yieldRepr(test_operator, _ReprMax(w), channel), reprs)
minimum([extr..., typemax(T)])
end
enum_acc_repr(test_operator::TestOperator, w::Interval2D, ::_Topo_NTPP, X::Integer, Y::Integer) = enum_acc_repr(test_operator, w, RectangleRelation(IA_D,IA_D), X, Y)
enum_acc_repr(test_operator::TestOperator, w::Interval2D, ::_Topo_NTPPi, X::Integer, Y::Integer) = enum_acc_repr(test_operator, w, RectangleRelation(IA_Di,IA_Di), X, Y)
=#
#=
# To test optimizations
fn1 = ModalLogic.enum_acc_repr
fn2 = ModalLogic.enum_acc_repr2
rel = ModalLogic.Topo_EC
X = 4
Y = 3
while(true)
a = randn(4,4);
wextr = (x)->ModalLogic.computePropositionalThresholdDual([canonical_geq, canonical_leq], x,a);
# TODO try all rectangles, avoid randominzing like this... Also try all channel sizes
x1 = rand(1:X);
x2 = x1+rand(1:(X+1-x1));
x3 = rand(1:Y);
x4 = x3+rand(1:(Y+1-x3));
for i in 1:X
println(a[i,:]);
end
println(x1,",",x2);
println(x3,",",x4);
println(a[x1:x2-1,x3:x4-1]);
print("[")
print(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> (y)->map((x)->ModalLogic.print_world(x),y));
println("]")
print("[")
print(fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> (y)->map((x)->ModalLogic.print_world(x),y));
println("]")
println(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr);
println(fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr);
(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr) == (fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr) || break;
end
fn1 = ModalLogic.enum_acc_repr
fn2 = ModalLogic.enum_acc_repr2
rel = ModalLogic.Topo_EC
a = [253 670 577; 569 730 931; 633 850 679];
X,Y = size(a)
while(true)
wextr = (x)->ModalLogic.computePropositionalThresholdDual([canonical_geq, canonical_leq], x,a);
# TODO try all rectangles, avoid randominzing like this... Also try all channel sizes
x1 = rand(1:X);
x2 = x1+rand(1:(X+1-x1));
x3 = rand(1:Y);
x4 = x3+rand(1:(Y+1-x3));
for i in 1:X
println(a[i,:]);
end
println(x1,",",x2);
println(x3,",",x4);
println(a[x1:x2-1,x3:x4-1]);
print("[")
print(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> (y)->map((x)->ModalLogic.print_world(x),y));
println("]")
print("[")
print(fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> (y)->map((x)->ModalLogic.print_world(x),y));
println("]")
println(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr);
println(fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr);
(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr) == (fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr) || break;
end
fn1 = ModalLogic.enum_acc_repr
fn2 = ModalLogic.enum_acc_repr2
rel = ModalLogic.Topo_EC
a = [253 670 577; 569 730 931; 633 850 679];
X,Y = size(a)
while(true)
wextr = (x)->ModalLogic.computePropositionalThresholdDual([canonical_geq, canonical_leq], x,a);
# TODO try all rectangles, avoid randominzing like this... Also try all channel sizes
x1 = 2
x2 = 3
x3 = 2
x4 = 3
for i in 1:X
println(a[i,:]);
end
println(x1,",",x2);
println(x3,",",x4);
println(a[x1:x2-1,x3:x4-1]);
print("[")
print(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> (y)->map((x)->ModalLogic.print_world(x),y));
println("]")
print("[")
print(fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> (y)->map((x)->ModalLogic.print_world(x),y));
println("]")
println(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr);
println(fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr);
(fn1(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr) == (fn2(ModalLogic.Interval2D((x1,x2),(x3,x4)), rel, size(a)...) |> wextr) || break;
end
=#
################################################################################
# END 2D Topological relations
################################################################################
# const DimensionalUniDataset{T<:Number,UD} = AbstractArray{T,UD}# getUniChannel(ud::DimensionalUniDataset{T,1}, idx::Integer) where T = @views ud[idx] # N=0
# getUniChannel(ud::DimensionalUniDataset{T,2}, idx::Integer) where T = @views ud[:, idx] # N=1
# getUniChannel(ud::DimensionalUniDataset{T,3}, idx::Integer) where T = @views ud[:, :, idx] # N=2
# Initialize DimensionalUniDataset by slicing across the attribute dimension
# DimensionalUniDataset(::UndefInitializer, d::DimensionalDataset{T,2}) where T = Array{T,1}(undef, nsamples(d))::DimensionalUniDataset{T,1}
# DimensionalUniDataset(::UndefInitializer, d::DimensionalDataset{T,3}) where T = Array{T,2}(undef, size(d)[1:end-1])::DimensionalUniDataset{T,2}
# DimensionalUniDataset(::UndefInitializer, d::DimensionalDataset{T,4}) where T = Array{T,3}(undef, size(d)[1:end-1])::DimensionalUniDataset{T,3}
# get_channel(d::DimensionalDataset{T,2}, idx_i::Integer, idx_a::Integer) where T = @views d[ idx_a, idx_i]::T # N=0
# get_channel(d::DimensionalDataset{T,3}, idx_i::Integer, idx_a::Integer) where T = @views d[:, idx_a, idx_i]::DimensionalChannel{T,1} # N=1
# get_channel(d::DimensionalDataset{T,4}, idx_i::Integer, idx_a::Integer) where T = @views d[:, :, idx_a, idx_i]::DimensionalChannel{T,2} # N=2
# channel_size(d::DimensionalDataset{T,2}, idx_i::Integer) where T = size(d[ 1, idx_i])
# channel_size(d::DimensionalDataset{T,3}, idx_i::Integer) where T = size(d[:, 1, idx_i])
# channel_size(d::DimensionalDataset{T,4}, idx_i::Integer) where T = size(d[:, :, 1, idx_i])
# channel_size(d::DimensionalDataset{T,D}, idx_i::Integer) where {T,D} = size(d[idx_i])[1:end-2]
# @computed get_channel(X::InterpretedModalDataset{T,N}, idxs::AbstractVector{Integer}, attribute::Integer) where T = X[idxs, attribute, fill(:, N)...]::AbstractArray{T,N-1}
# # get_channel(X::InterpretedModalDataset, args...) = get_channel(X.domain, args...)
# # get_channel(X::MultiFrameModalDataset, i_frame::Integer, idx_i::Integer, idx_f::Integer, args...) = get_channel(X.frames[i_frame], idx_i, idx_f, args...)
#
#
# TODO maybe using views can improve performances
# attributeview(X::DimensionalDataset{T,2}, idxs::AbstractVector{Integer}, attribute::Integer) = d[idxs, attribute]
# attributeview(X::DimensionalDataset{T,3}, idxs::AbstractVector{Integer}, attribute::Integer) = view(d, idxs, attribute, :)
# attributeview(X::DimensionalDataset{T,4}, idxs::AbstractVector{Integer}, attribute::Integer) = view(d, idxs, attribute, :, :)
# strip_domain(d::DimensionalDataset{T,2}) where T = d # N=0
# strip_domain(d::DimensionalDataset{T,3}) where T = dropdims(d; dims=1) # N=1
# strip_domain(d::DimensionalDataset{T,4}) where T = dropdims(d; dims=(1,2)) # N=2
# function prepare_featsnaggrs(grouped_featsnops::AbstractVector{<:AbstractVector{<:TestOperatorFun}})
# # Pairs of feature ids + set of aggregators
# grouped_featsnaggrs = Vector{<:Aggregator}[
# ModalLogic.existential_aggregator.(test_operators) for (i_feature, test_operators) in enumerate(grouped_featsnops)
# ]
# # grouped_featsnaggrs = [grouped_featsnaggrs[i_feature] for i_feature in 1:length(features)]
# # # Flatten dictionary, and enhance aggregators in dictionary with their relative indices
# # flattened_featsnaggrs = Tuple{<:AbstractFeature,<:Aggregator}[]
# # i_featsnaggr = 1
# # for (i_feature, aggregators) in enumerate(grouped_featsnaggrs)
# # for aggregator in aggregators
# # push!(flattened_featsnaggrs, (features[i_feature],aggregator))
# # i_featsnaggr+=1
# # end
# # end
# grouped_featsnaggrs
# end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1520 | using OpenML
using SoleData
using DataFrames
function load_japanesevowels()
X = DataFrame(OpenML.load(375)) # Load JapaneseVowels https://www.openml.org/search?type=data&status=active&id=375
names(X)
take_col = []
i_take = 1
prev_frame = nothing
# prev_utterance = nothing
for row in eachrow(X)
cur_frame = Float64(row.frame)
# cur_utterance = row.utterance
if !isnothing(prev_frame) && cur_frame == 1.0
i_take += 1
end
prev_frame = cur_frame
# prev_utterance = cur_utterance
push!(take_col, i_take)
end
# combine(groupby(X, [:speaker, :take, :utterance]), :coefficient1 => Base.vect)
# combine(groupby(X, [:speaker, :take, :utterance]), Base.vect)
# combine(groupby(X, [:speaker, :take, :utterance]), All() .=> Base.vect)
# combine(groupby(X, [:speaker, :take, :utterance]), :coefficient1 => Ref)
# countmap(take_col)
X[:,:take] = take_col
X = combine(DataFrames.groupby(X, [:speaker, :take, :utterance]), Not([:speaker, :take, :utterance, :frame]) .=> Ref; renamecols=false)
Y = X[:,:speaker]
# select!(X, Not([:speaker, :take, :utterance]))
# Force uniform size across instances by capping series
minimum_n_points = minimum(collect(Iterators.flatten(eachrow(length.(X[:,Not([:speaker, :take, :utterance])])))))
new_X = (x->x[1:minimum_n_points]).(X[:,Not([:speaker, :take, :utterance])])
# new_X, varnames = SoleData.dataframe2cube(new_X)
new_X, Y
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 4906 | using Revise
using ModalDecisionTrees
using SoleData.DimensionalDatasets
using Random
using BenchmarkTools
rng = MersenneTwister(1)
_ninstances, _ninstances_h = 10, 5
n_vars = 2
n_feats = n_vars*2
n_pts = 5
using SoleModels
using SoleData: VariableMin, VariableMax
features = []
featsnops = []
for i_var in 1:n_vars
push!(features, VariableMin(i_var))
push!(featsnops, [≥])
push!(features, VariableMax(i_var))
push!(featsnops, [<])
end
Xs = MultiLogiset([
scalarlogiset(randn(n_pts, n_vars, _ninstances),
features,
conditions = featsnops,
relations = [IARelations...],
onestep_precompute_relmemoset = true,
onestep_precompute_globmemoset = true,
)
]);
W = ModalDecisionTrees.default_weights(_ninstances)
kwargs = (;
loss_function = ModalDecisionTrees.entropy,
max_depth = typemax(Int),
min_samples_leaf = 4,
min_purity_increase = -Inf,
max_purity_at_leaf = 0.2,
n_subrelations = Function[identity],
n_subfeatures = Int64[n_feats],
allow_global_splits = [true],
use_minification = false,
)
perform_consistency_check = true
# @code_warntype ninstances(Xs)
initconditions = [ModalDecisionTrees.start_without_world]
################################################################################
# fit
################################################################################
Y = String[fill("0", _ninstances_h)..., fill("1", _ninstances_h)...]
ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
@code_warntype ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
@inferred ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
Y = Int64[fill(3, _ninstances_h)..., fill(1, _ninstances_h)...]
ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
@code_warntype ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
@inferred ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
Y = Float64[fill(0.0, _ninstances_h)..., fill(1.0, _ninstances_h)...]
ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
@code_warntype ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
@inferred ModalDecisionTrees.fit_tree(Xs, Y, initconditions, W; perform_consistency_check = perform_consistency_check, kwargs...)
################################################################################
# _fit
################################################################################
Y = Int64[fill(1, _ninstances_h)..., fill(2, _ninstances_h)...]
ModalDecisionTrees._fit_tree(Xs, Y, initconditions, W;
n_classes = 2,
_is_classification = Val(true),
_perform_consistency_check = Val(perform_consistency_check), kwargs...)
@code_warntype ModalDecisionTrees._fit_tree(Xs, Y, initconditions, W;
n_classes = 2,
_is_classification = Val(true),
_perform_consistency_check = Val(perform_consistency_check), kwargs...)
Y = Float64[fill(0.0, _ninstances_h)..., fill(1.0, _ninstances_h)...]
ModalDecisionTrees._fit_tree(Xs, Y, initconditions, W;
n_classes = 0,
_is_classification = Val(false),
_perform_consistency_check = Val(perform_consistency_check), kwargs...)
@code_warntype ModalDecisionTrees._fit_tree(Xs, Y, initconditions, W;
n_classes = 0,
_is_classification = Val(false),
_perform_consistency_check = Val(perform_consistency_check), kwargs...)
################################################################################
# split_node!
################################################################################
Y = Int64[fill(1, _ninstances_h)..., fill(2, _ninstances_h)...]
idxs = collect(1:_ninstances)
Ss = ModalDecisionTrees.initialworldsets(Xs, initconditions)
onlyallowglobal = [(initcond == ModalDecisionTrees.start_without_world) for initcond in initconditions]
node = ModalDecisionTrees.NodeMeta{Float64,Int64}(1:_ninstances, 0, 0, onlyallowglobal)
@code_warntype ModalDecisionTrees.split_node!(node, Xs, Ss, Y, initconditions, W;
idxs = idxs,
rng = rng,
n_classes = 2,
_is_classification = Val(true),
_perform_consistency_check = Val(perform_consistency_check),
kwargs...,
)
# https://docs.julialang.org/en/v1/manual/performance-tips/#Be-aware-of-when-Julia-avoids-specializing
# @which f(...)).specializations
# TODO regression case
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3014 | using Test
using SoleData
using SoleModels
using ModalDecisionTrees
using ModalDecisionTrees: DTLeaf, prediction
using ModalDecisionTrees: DTInternal, decision
# Creation of decision leaves, nodes, decision trees, forests
# Construct a leaf from a label
# @test DTLeaf(1) == DTLeaf{Int64}(1, Int64[])
# @test DTLeaf{Int64}(1) == DTLeaf{Int64}(1, Int64[])
# @test DTLeaf("Class_1") == DTLeaf{String}("Class_1", String[])
# @test DTLeaf{String}("Class_1") == DTLeaf{String}("Class_1", String[])
# Construct a leaf from a label & supporting labels
# @test DTLeaf(1, []) == DTLeaf{Int64}(1, Int64[])
# @test DTLeaf{Int64}(1, [1.0]) == DTLeaf{Int64}(1, Int64[1])
@test repr( DTLeaf(1.0, [1.0])) == repr(DTLeaf{Float64}(1.0, [1.0]))
@test_nowarn DTLeaf{Float32}(1, [1])
@test_nowarn DTLeaf{Float32}(1.0, [1.5])
@test_throws MethodError DTLeaf(1, ["Class1"])
@test_throws InexactError DTLeaf(1, [1.5])
@test_nowarn DTLeaf{String}("1.0", ["0.5", "1.5"])
# Inferring the label from supporting labels
@test prediction(DTLeaf{String}(["Class_1", "Class_1", "Class_2"])) == "Class_1"
@test_nowarn DTLeaf(["1.5"])
@test_throws MethodError DTLeaf([1.0,"Class_1"])
# Check robustness
@test_nowarn DTLeaf{Int64}(1, 1:10)
@test_nowarn DTLeaf{Int64}(1, 1.0:10.0)
@test_nowarn DTLeaf{Float32}(1, 1:10)
# @test prediction(DTLeaf(1:10)) == 5
@test prediction(DTLeaf{Float64}(1:10)) == 5.5
@test prediction(DTLeaf{Float32}(1:10)) == 5.5f0
@test prediction(DTLeaf{Float64}(1:11)) == 6
# Check edge parity case (aggregation biased towards the first class)
@test prediction(DTLeaf{String}(["Class_1", "Class_2"])) == "Class_1"
@test prediction(DTLeaf(["Class_1", "Class_2"])) == "Class_1"
# TODO test NSDT Leaves
# Decision internal node (DTInternal) + Decision Tree & Forest (DTree & DForest)
formula = SoleData.ScalarExistentialFormula(SoleData.globalrel, VariableMin(1), >=, 10)
_decision = RestrictedDecision(formula)
reg_leaf, cls_leaf = DTLeaf([1.0,2.0]), DTLeaf([1,2])
# create node
cls_node = @test_nowarn DTInternal(2, _decision, cls_leaf, cls_leaf, cls_leaf)
# composite node
cls_node = @test_nowarn DTInternal(2, _decision, cls_leaf, cls_leaf, cls_leaf)
cls_node = DTInternal(2, _decision, cls_leaf, cls_leaf, cls_leaf)
# Note: modality is required
@test_throws MethodError DTInternal(_decision, cls_leaf, cls_leaf, cls_leaf)
@test_throws MethodError DTInternal(_decision, reg_leaf, reg_leaf, reg_leaf)
@test_throws MethodError DTInternal(_decision, cls_node, cls_leaf)
# create node without local _decision
# cls_node = @test_nowarn DTInternal(2, _decision, cls_leaf, cls_leaf)
@test_logs (:warn,) DTInternal(2, _decision, cls_leaf, cls_leaf)
cls_node = DTInternal(2, _decision, cls_leaf, cls_leaf)
# Mixed tree
@test_throws AssertionError DTInternal(2, _decision, reg_leaf, cls_leaf)
cls_tree = @test_nowarn DTree(cls_node, [ModalDecisionTrees.Interval], [ModalDecisionTrees.start_without_world])
cls_forest = @test_nowarn DForest([cls_tree, cls_tree, cls_tree])
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 12358 | using ModalDecisionTrees
using MLJ
using DataFrames
using SoleModels
using SoleData
using Logging
using SoleLogics
using Random
using Test
N = 5
y = [i <= div(N,2)+1 for i in 1:N]
# Split dataset
p = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
_size = ((x)->(hasmethod(size, (typeof(x),)) ? size(x) : missing))
X_static = DataFrame(
ID = 1:N,
a = randn(N),
b = [-2.0, 1.0, 2.0, missing, 3.0],
c = [1, 2, 3, 4, 5],
d = [0, 1, 0, 1, 0],
e = ['M', 'F', missing, 'M', 'F'],
)
_size.(X_static)
@test_throws AssertionError MLJ.fit!(machine(ModalDecisionTree(;), X_static, y), rows=train_idxs)
X_static = DataFrame(
ID = 1:N,
# a = randn(N),
b = [-2.0, -1.0, 2.0, 2.0, 3.0],
c = [1, 2, 3, 4, 5],
d = [0, 1, 0, 1, 0],
)
_size.(X_static)
@test_throws AssertionError MLJ.fit!(machine(ModalDecisionTree(;), X_static, y), rows=train_idxs)
mach = MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 2), Float64.(X_static[:,Not(:ID)]), y), rows=train_idxs)
@test depth(fitted_params(mach).tree) == 0
mach = MLJ.fit!(machine(ModalDecisionTree(; min_purity_increase=-Inf, min_samples_leaf = 1), Float64.(X_static[:,Not(:ID)]), y), rows=train_idxs)
@test depth(fitted_params(mach).tree) > 0
X_multi1 = DataFrame(
ID = 1:N,
t1 = [randn(2), randn(2), randn(2), randn(2), randn(2)], # good
t2 = [randn(2), randn(2), randn(2), randn(2), randn(2)], # good
)
_size.(X_multi1)
MLJ.fit!(machine(ModalDecisionTree(;), X_multi1, y), rows=train_idxs)
X_multi2 = DataFrame(
ID = 1:N,
t3 = [randn(2), randn(2), randn(2), randn(2), randn(2)], # good
twrong1 = [randn(2), randn(2), randn(5), randn(2), randn(4)], # good but actually TODO
)
_size.(X_multi2)
MLJ.fit!(machine(ModalDecisionTree(;), X_multi2, y), rows=train_idxs)
X_images1 = DataFrame(
ID = 1:N,
R1 = [randn(2,2), randn(2,2), randn(2,2), randn(2,2), randn(2,2)], # good
)
_size.(X_images1)
MLJ.fit!(machine(ModalDecisionTree(;), X_images1, y), rows=train_idxs)
X_images1 = DataFrame(
ID = 1:N,
R1 = [randn(2,3), randn(2,3), randn(2,3), randn(2,3), randn(2,3)], # good
)
_size.(X_images1)
logiset = scalarlogiset(X_images1[:,Not(:ID)]; use_onestep_memoization=true, conditions = [
ScalarMetaCondition(VariableMax(1), ≥),
ScalarMetaCondition(VariableMax(1), <),
ScalarMetaCondition(VariableMin(1), ≥),
ScalarMetaCondition(VariableMin(1), <),
], relations = [globalrel])
ModalDecisionTrees.build_tree(logiset, y)
ModalDecisionTrees.build_tree(logiset, y;
max_depth = nothing,
min_samples_leaf = ModalDecisionTrees.BOTTOM_MIN_SAMPLES_LEAF,
min_purity_increase = ModalDecisionTrees.BOTTOM_MIN_PURITY_INCREASE,
max_purity_at_leaf = ModalDecisionTrees.BOTTOM_MAX_PURITY_AT_LEAF,
)
ModalDecisionTrees.build_tree(MultiLogiset(logiset), y)
multilogiset, _ = ModalDecisionTrees.wrapdataset(X_images1[:,Not(:ID)], ModalDecisionTree(; min_samples_leaf = 1))
kwargs = (loss_function = nothing, max_depth = nothing, min_samples_leaf = 1, min_purity_increase = 0.002, max_purity_at_leaf = Inf, max_modal_depth = nothing, n_subrelations = identity, n_subfeatures = identity, initconditions = ModalDecisionTrees.StartAtCenter(), allow_global_splits = true, use_minification = false, perform_consistency_check = false, rng = Random.GLOBAL_RNG, print_progress = false)
ModalDecisionTrees.build_tree(multilogiset, y;
kwargs...
)
MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 1, relations = (d)->[globalrel]), X_images1[:,Not(:ID)], y), rows=train_idxs, verbosity=2)
MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 1, relations = (d)->[globalrel]), X_images1[:,Not(:ID)], y), verbosity=2)
@test_throws CompositeException MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 1, initconditions = :start_at_center, relations = (d)->SoleLogics.AbstractRelation[]), X_images1[:,Not(:ID)], y), verbosity=2)
X_images1 = DataFrame(
ID = 1:N,
R1 = [randn(2,3), randn(2,3), randn(2,3), randn(2,3), randn(2,3)], # good
G1 = [randn(3,3), randn(3,3), randn(3,3), randn(3,3), randn(3,3)], # good
B1 = [randn(3,3), randn(3,3), randn(3,3), randn(3,3), randn(3,3)], # good
)
_size.(X_images1)
MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 2), X_images1[:,Not(:ID)], y), rows=train_idxs)
X_images2 = DataFrame(
ID = 1:N,
R2 = [ones(5,5), ones(5,5), ones(5,5), zeros(5,5), zeros(5,5)], # good
G2 = [randn(5,5), randn(5,5), randn(5,5), randn(5,5), randn(5,5)], # good
B2 = [randn(5,5), randn(5,5), randn(5,5), randn(5,5), randn(5,5)], # good
)
_size.(X_images2)
X_all = innerjoin([Float64.(X_static), X_multi1, X_multi2, X_images1, X_images2]... , on = :ID)[:, Not(:ID)]
_size.(X_all)
MLJ.fit!(machine(ModalDecisionTree(;), X_all, y), rows=train_idxs)
X_all = innerjoin([X_multi1, X_images2]... , on = :ID)[:, Not(:ID)]
mach = MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 1), X_all, y), rows=train_idxs)
multilogiset, var_grouping = ModalDecisionTrees.wrapdataset(X_all, ModalDecisionTree(; min_samples_leaf = 1))
ModalDecisionTrees.build_tree(multilogiset, y;
kwargs...
)
############################################################################################
############################################################################################
############################################################################################
# Multimodal tree:
X_all = DataFrame(
mode0 = [1.0, 0.0, 0.0, 0.0, 0.0],
mode1 = [zeros(5), ones(5), zeros(5), zeros(5), zeros(5)],
mode2 = [zeros(5,5), zeros(5,5), ones(5,5), zeros(5,5), zeros(5,5)],
)
mach = MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 1), X_all, y), rows=train_idxs)
report(mach).printmodel(1000; threshold_digits = 2);
printmodel.(listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true));
printmodel.(joinrules(listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true)));
model = ModalDecisionTree(min_purity_increase = 0.001)
@test_logs min_level=Logging.Error machine(model, X_multi1, y) |> fit!
@test_logs min_level=Logging.Error machine(model, X_multi2, y) |> fit!
@test_logs min_level=Logging.Error machine(model, X_images1, y) |> fit!
@test_logs min_level=Logging.Error machine(model, X_images2, y) |> fit!
machine(model, X_all, y) |> fit!
# @test_throws AssertionError machine(model, X_all, y) |> fit!
############################################################################################
############################################################################################
############################################################################################
using MultiData
using SoleData
using ModalDecisionTrees
using MLJ
using DataFrames
using SoleModels
using Random
using Test
using Logging
N = 5
# Multimodal tree:
_X_all = DataFrame(
mode0 = [1.0, 0.0, 0.0, 0.0, 0.0],
mode1 = [zeros(5), ones(5), zeros(5), zeros(5), zeros(5)],
mode2 = [zeros(5,5), zeros(5,5), ones(5,5), zeros(5,5), zeros(5,5)],
)
X_all = MultiDataset(_X_all)
y = [i <= div(N,2)+1 for i in 1:N]
# Split dataset
p = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
@test_logs min_level=Logging.Error wrapdataset(X_all, ModalDecisionTree(;))
@test_logs min_level=Logging.Error mach = MLJ.fit!(machine(ModalDecisionTree(; min_samples_leaf = 1), X_all, y), rows=train_idxs)
# Very multimodal tree:
N = 100
# Multimodal tree:
# X_all = DataFrame(
# mode0 = [min(rand(), 1/i) for i in 1:N],
# mode1 = [max.(rand(5), 1/i) for i in 1:N],
# mode2 = [begin
# a = zeros(5,5)
# idx = rand(1:ceil(Int, 4*(i/N)))
# a[idx:1+idx,2:3] = max.(rand(2, 2), 1/i)
# end for i in 1:N],
# )
X_all = DataFrame(
mode0 = [rand() for i in 1:N],
mode1 = [rand(5) for i in 1:N],
mode2 = [rand(5,5) for i in 1:N],
)
X_all = MultiDataset(X_all)
y = [i <= div(N,2)+1 for i in 1:N]
# Split dataset
p = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
multilogiset, var_grouping = ModalDecisionTrees.wrapdataset(X_all, ModalDecisionTree(; min_samples_leaf = 1))
mach = MLJ.fit!(machine(ModalDecisionTree(; max_purity_at_leaf = Inf, min_samples_leaf = 1, min_purity_increase = -Inf), X_all, y), rows=train_idxs)
preds = string.(predict_mode(mach, X_all))
report(mach).model
report(mach).solemodel
printmodel(report(mach).solemodel; show_shortforms = true)
longform_ruleset = (listrules(report(mach).model; use_shortforms=false));
shortform_ruleset = (listrules(report(mach).model; use_shortforms=true));
longform_ruleset .|> antecedent .|> x->syntaxstring(x; threshold_digits = 2) .|> println;
shortform_ruleset .|> antecedent .|> x->syntaxstring(x; threshold_digits = 2) .|> println;
as = (longform_ruleset .|> antecedent);
as = as .|> (x->normalize(x; allow_atom_flipping=true, prefer_implications = true))
bs = (shortform_ruleset .|> antecedent);
bs = bs .|> (x->normalize(x; allow_atom_flipping=true, prefer_implications = true))
# (as[2], bs[2]) .|> x->syntaxstring(x; threshold_digits = 2) .|> println
# (as[13], bs[13]) .|> x->syntaxstring(x; threshold_digits = 2) .|> println
# as .|> x->syntaxstring(x; threshold_digits = 2)
# bs .|> x->syntaxstring(x; threshold_digits = 2)
# @test isequal(as, bs)
# @test all(((x,y),)->isequal(x,y), collect(zip((longform_ruleset .|> antecedent .|> x->normalize(x; allow_atom_flipping=true)), (shortform_ruleset .|> antecedent .|> x->normalize(x; allow_atom_flipping=true)))))
# Longform set is mutually exclusive & collectively exhaustive
longform_y_per_rule = [SoleModels.apply(r, multilogiset) for r in longform_ruleset]
m1 = hcat(longform_y_per_rule...)
@test all(r->count(!isnothing, r) >= 1, eachrow(m1));
@test all(r->count(!isnothing, r) < 2, eachrow(m1));
@test all(r->count(!isnothing, r) == 1, eachrow(m1));
# Path formula CORRECTNESS! Very very important!!
map(s->filter(!isnothing, s), eachrow(m1))
longform_y = map(s->filter(!isnothing, s)[1], eachrow(m1))
@test preds == longform_y
# Shortform set is mutually exclusive & collectively exhaustive
shortform_y_per_rule = [SoleModels.apply(r, multilogiset) for r in shortform_ruleset]
m2 = hcat(shortform_y_per_rule...)
@test all(r->count(!isnothing, r) >= 1, eachrow(m2));
@test all(r->count(!isnothing, r) < 2, eachrow(m2));
@test all(r->count(!isnothing, r) == 1, eachrow(m2));
# Path formula CORRECTNESS! Very very important!!
map(s->filter(!isnothing, s), eachrow(m2))
shortform_y = map(s->filter(!isnothing, s)[1], eachrow(m2))
@test shortform_y == preds
# More consistency
_shortform_y_per_rule = [map(r->SoleModels.apply(r, multilogiset, i_instance), shortform_ruleset) for i_instance in 1:ninstances(multilogiset)]
for j in 1:size(m1, 1)
for i in 1:size(m1, 2)
@test m2[j,i] == hcat(_shortform_y_per_rule...)[i,j]
end
end
@test eachcol(hcat(_shortform_y_per_rule...)) == eachrow(hcat(shortform_y_per_rule...))
# More consistency
_longform_y_per_rule = [map(r->SoleModels.apply(r, multilogiset, i_instance), longform_ruleset) for i_instance in 1:ninstances(multilogiset)]
for j in 1:size(m1, 1)
for i in 1:size(m1, 2)
@test m1[j,i] == hcat(_longform_y_per_rule...)[i,j]
end
end
@test eachcol(hcat(_longform_y_per_rule...)) == eachrow(hcat(longform_y_per_rule...))
@test longform_y_per_rule == shortform_y_per_rule
@test _longform_y_per_rule == _shortform_y_per_rule
# filter.(!isnothing, eachrow(hcat(longform_y_per_rule...)))
# # filter.(!isnothing, eachcol(hcat(longform_y_per_rule...)))
# filter.(!isnothing, eachrow(hcat(shortform_y_per_rule...)))
# # filter.(!isnothing, eachcol(hcat(shortform_y_per_rule...)))
printmodel.(listrules(report(mach).model; use_shortforms=true));
printmodel.(listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true));
printmodel.(joinrules(longform_ruleset); show_metrics = true);
printmodel.(joinrules(shortform_ruleset); show_metrics = true);
printmodel.(joinrules(listrules(report(mach).model)));
@test_nowarn printmodel.(listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true))
@test_nowarn printmodel.(joinrules(listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true)))
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2586 | using Pkg
# Pkg.activate(".")
# Pkg.add("Revise")
# Pkg.add("MLJ")
# Pkg.add("MLJBase")
# Pkg.add(url = "https://github.com/aclai-lab/ModalDecisionTrees.jl", rev = "dev-v0.8")
# Pkg.add("ScientificTypes")
# Pkg.add("DataFrames")
# Pkg.add("Tables")
# Pkg.add("ARFFFiles#main")
using Revise
using ModalDecisionTrees
using MLJ
using MLJBase
using ScientificTypes
using DataFrames
using Tables
using StatsBase
using MLJModelInterface
MMI = MLJModelInterface
MDT = ModalDecisionTrees
include("utils.jl")
model = ModalDecisionTree(;
min_samples_leaf = 4,
min_purity_increase = 0.002,
)
using ARFFFiles, DataFrames
dataset_name = "NATOPS"
# dataset_name = "RacketSports"
# dataset_name = "Libras"
X, y = SoleModels.load_arff_dataset(dataset_name)
fitresult = MMI.fit(model, 0, X, Y);
Y_test_preds, test_tree = MMI.predict(model, fitresult[1], X_test, Y_test);
tree = fitresult[1].rawmodel
fitresult[3].print_tree()
fitresult[3].print_tree(test_tree)
println(tree)
println(test_tree)
# MLJ.ConfusionMatrix()(Y_test_preds, Y_test);
# SoleModels.ConfusionMatrix(Y_test_preds, Y_test)
# tree = fitresult.rawmodel
# println(tree)
# println(test_tree)
# fitreport.print_tree()
# fitreport.print_tree(test_tree)
# using AbstractTrees
# using GraphRecipes
# using Plots
# default(size=(1000, 1000))
# plot(TreePlot(tree.root), method=:tree, fontsize=10)
# show_latex(tree, "train")
# show_latex(test_tree, "test")
show_latex(tree, "train", [variable_names_latex])
show_latex(test_tree, "test", [variable_names_latex])
# function apply_static_descriptor(X::DataFrame, f::Function)
# variable_names = names(X)
# rename(f.(X), Dict([Symbol(a) => Symbol("$(f)($(a))") for a in variable_names]))
# end
# function apply_static_descriptor(X::DataFrame, fs::AbstractVector{<:Function})
# hcat([apply_static_descriptor(X, f) for f in fs]...)
# end
# for fs in [mean, var, minimum, maximum, [minimum, maximum], [mean, minimum, maximum], [var, mean, minimum, maximum]]
# X_static_train = apply_static_descriptor(X_train, fs)
# X_static_test = apply_static_descriptor(X_test, fs)
# fitresult = MMI.fit(model, 0, X_static_train, Y_train);
# Y_test_preds, test_tree = MMI.predict(model, fitresult[1], X_static_test, Y_test);
# tree = fitresult[1].rawmodel
# fitresult[3].print_tree()
# fitresult[3].print_tree(test_tree)
# # println(tree)
# # println(test_tree)
# # MLJ.ConfusionMatrix()(Y_test_preds, Y_test)
# println(fs)
# println(SoleModels.ConfusionMatrix(Y_test_preds, Y_test))
# readline()
# end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1688 | using Test
using ModalDecisionTrees
using MLJ
using MLJBase
using SoleModels
using SoleData
using SoleData.DimensionalDatasets
using DataFrames
using Random
using CategoricalArrays
using StatsBase
using StatsBase: mean
using ModalDecisionTrees: build_stump, build_tree, build_forest
# For MLDatasets
ENV["DATADEPS_ALWAYS_ACCEPT"] = true
# Pkg.update()
println("Julia version: ", VERSION)
function run_tests(list)
println("\n" * ("#"^50))
for test in list
println("TEST: $test")
include(test)
println("=" ^ 50)
end
end
test_suites = [
("Base", ["base.jl"]),
("Classification, modal", [
"classification/japanesevowels.jl",
"classification/digits.jl",
"classification/mnist.jl",
# "classification/demo-juliacon2022.jl",
]),
("Classification", [
"classification/iris.jl",
"classification/iris-params.jl",
]),
("Regression", [
"regression/simple.jl",
# "regression/ames.jl",
"regression/digits-regression.jl",
# "regression/random.jl",
]),
("Miscellaneous", [
"multimodal-datasets-multiformulas-construction.jl",
]),
("Other", [
"other/parse-and-translate-restricted.jl",
"other/restricted2complete.jl",
"other/translate-complete.jl",
]),
("Pluto Demo", ["$(dirname(dirname(pathof(ModalDecisionTrees))))/pluto-demo.jl", ]),
]
@testset "ModalDecisionTrees.jl" begin
for ts in 1:length(test_suites)
name = test_suites[ts][1]
list = test_suites[ts][2]
let
@testset "$name" begin
run_tests(list)
end
end
end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 5563 | using MLJ
using ModalDecisionTrees
using MLDatasets
using SoleData
using SoleModels
using Test
Xcube, y = begin
if MNIST isa Base.Callable # v0.7
CIFAR10(:test)[:]
else # v0.5
CIFAR10.testdata()
end
end
class_names = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
y = map(_y-> class_names[_y+1], y)
N = length(y)
n_test = 1000
n_train = 1000
p = 1:n_test
p_test = n_test .+ (1:n_train)
############################################################################################
############################################################################################
############################################################################################
X = SoleData.cube2dataframe(Xcube, ["R", "G", "B"])
X_train, y_train = X[p,:], y[p]
X_test, y_test = X[p_test,:], y[p_test]
model = ModalDecisionTree(;
relations = :RCC8,
conditions = [minimum],
# initconditions = :start_at_center,
featvaltype = Float32,
downsize = (10,10), # (x)->ModalDecisionTrees.MLJInterface.moving_average(x, (10,10))
# conditions = [minimum, maximum, UnivariateFeature{Float64}(recheight), UnivariateFeature{Float64}(recwidth)],
# conditions = [minimum, maximum, UnivariateFeature{Float32}(1, recheight), UnivariateFeature{Float32}(1, recwidth)],
print_progress = true,
)
mach = machine(model, X_train, y_train) |> fit!
report(mach).printmodel(1000; threshold_digits = 2);
printmodel(report(mach).model; show_metrics = true);
printmodel.(listrules(report(mach).model); show_metrics = true);
yhat_test = MLJ.predict_mode(mach, X_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.15
@test_broken MLJ.accuracy(y_test, yhat_test) > 0.5
yhat_test2, tree2 = report(mach).sprinkle(X_test, y_test);
@test yhat_test2 == yhat_test
soletree2 = ModalDecisionTrees.translate(tree2)
printmodel(soletree2; show_metrics = true);
printmodel.(listrules(soletree2); show_metrics = true);
SoleModels.info.(listrules(soletree2), :supporting_labels);
leaves = consequent.(listrules(soletree2))
SoleModels.readmetrics.(leaves)
zip(SoleModels.readmetrics.(leaves),leaves) |> collect |> sort
@test MLJ.accuracy(y_test, yhat_test) > 0.4
############################################################################################
############################################################################################
############################################################################################
# using Images
# using ImageFiltering
# using StatsBase
# Xcube
# # img = eachslice(Xcube; dims=4)[1]
# Xcubergb = mapslices(c->RGB(c...), Xcube, dims=3)
# Xcubehsv = HSV.(Xcubergb)
# # Xcubergb = mapslices(c->(@show c), Xcubehsv, dims=3)
# Xcubehsv = mapslices(c->[first(c).h, first(c).s, first(c).v], Xcubehsv, dims=3)
# # Xcubergb = mapslices(c->[c.h, c.s, c.v], Xcubehsv, dims=[1,2,4])
# X = SoleData.cube2dataframe(Xcube, ["H", "S", "V"])
# X_train, y_train = X[p,:], y[p]
# X_test, y_test = X[p_test,:], y[p_test]
# kernel = [1 0 -1;
# 2 0 -2;
# 1 0 -1]
# im = imfilter(rand(10,10), kernel)
# im = imfilter(rand(2,2), kernel)
# recvedge(x) = (imfilter(x, [1;; -1]))
# rechedge(x) = (imfilter(x, [1;; -1]'))
# recvsobel(x) = (imfilter(x, [1 0 -1; 2 0 -2; 1 0 -1]))
# rechsobel(x) = (imfilter(x, [1 0 -1; 2 0 -2; 1 0 -1]'))
# vedge(x) = StatsBase.mean(recvedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(recvedge(x))
# hedge(x) = StatsBase.mean(rechedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(rechedge(x))
# vsobel(x) = StatsBase.mean(recvsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(recvsobel(x))
# hsobel(x) = StatsBase.mean(rechsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(rechsobel(x))
# svedge(x) = StatsBase.sum(recvedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(recvedge(x))
# shedge(x) = StatsBase.sum(rechedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(rechedge(x))
# svsobel(x) = StatsBase.sum(recvsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(recvsobel(x))
# shsobel(x) = StatsBase.sum(rechsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(rechsobel(x))
# model = ModalDecisionTree(;
# relations = :RCC8,
# min_samples_leaf = 8,
# conditions = [svsobel, shsobel],
# # initconditions = :start_at_center,
# initconditions = :start_with_global,
# featvaltype = Float32,
# downsize = (8,8),
# # conditions = [minimum, maximum, UnivariateFeature{Float64}(recheight), UnivariateFeature{Float64}(recwidth)],
# # conditions = [minimum, maximum, UnivariateFeature{Float32}(1, recheight), UnivariateFeature{Float32}(1, recwidth)],
# print_progress = true,
# )
# mach = machine(model, X_train, y_train) |> fit!
# report(mach).printmodel(1000; threshold_digits = 2);
# printmodel(report(mach).model; show_metrics = true);
# printmodel.(listrules(report(mach).model); show_metrics = true);
# yhat_test = MLJ.predict_mode(mach, X_test)
# MLJ.accuracy(y_test, yhat_test)
# @test MLJ.accuracy(y_test, yhat_test) > 0.15
# @test_broken MLJ.accuracy(y_test, yhat_test) > 0.5
# model = ModalDecisionTree(;
# relations = :RCC8,
# conditions = [svedge, shedge],
# # initconditions = :start_at_center,
# featvaltype = Float32,
# downsize = (5,5),
# # conditions = [minimum, maximum, UnivariateFeature{Float64}(recheight), UnivariateFeature{Float64}(recwidth)],
# # conditions = [minimum, maximum, UnivariateFeature{Float32}(1, recheight), UnivariateFeature{Float32}(1, recwidth)],
# print_progress = true,
# )
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 10273 |
using Tables
using DataFrames
using StatsBase
import MLJModelInterface: fit
variable_names_latex = [
"\\text{hand tip}_X^L",
"\\text{hand tip}_Y^L",
"\\text{hand tip}_Z^L",
"\\text{hand tip}_X^R",
"\\text{hand tip}_Y^R",
"\\text{hand tip}_Z^R",
"\\text{elbow}_X^L",
"\\text{elbow}_Y^L",
"\\text{elbow}_Z^L",
"\\text{elbow}_X^R",
"\\text{elbow}_Y^R",
"\\text{elbow}_Z^R",
"\\text{wrist}_X^L",
"\\text{wrist}_Y^L",
"\\text{wrist}_Z^L",
"\\text{wrist}_X^R",
"\\text{wrist}_Y^R",
"\\text{wrist}_Z^R",
"\\text{thumb}_X^L",
"\\text{thumb}_Y^L",
"\\text{thumb}_Z^L",
"\\text{thumb}_X^R",
"\\text{thumb}_Y^R",
"\\text{thumb}_Z^R",
]
function show_latex(tree; file_suffix = "", variable_names = nothing, silent = true)
include("../results/utils/print-tree-to-latex.jl")
savedir = "latex"
additional_dict = Dict{String, String}(
"predictionI have command" => "\\fcolorbox{black}{pastel1}{\\ \\ I have command\\ \\ }",
"predictionAll clear" => "\\fcolorbox{black}{pastel2}{\\ \\ All clear\\ \\ }",
"predictionNot clear" => "\\fcolorbox{black}{pastel3}{\\ \\ Not clear\\ \\ }",
"predictionSpread wings" => "\\fcolorbox{black}{pastel4}{\\ \\ Spread wings\\ \\ }",
"predictionFold wings" => "\\fcolorbox{black}{pastel5}{\\ \\ Fold wings\\ \\ }",
"predictionLock wings" => "\\fcolorbox{black}{pastel6}{\\ \\ Lock wings\\ \\ }",
)
common_kwargs = (
conversion_dict = additional_dict,
# threshold_scale_factor = 3,
threshold_show_decimals = 2,
hide_modality_ids = true,
variable_names_map = variable_names,
# replace_dict = Dict([
# # "\\{1\\}" => "",
# # "{1}" => "",
# "NN" => "N",
# ]),
scale = 1.,
height = ["25em", "25em", "20em", "22em", "22em", "22em"],
decisions_at_nodes = false,
edges_textsize = [:Large, :small, :Large, :small, :small, :small],
tree_names = ["t_minmax_static", "t_minmax_temporal", "t_neuro_static", "t_minmax_neuro_temporal", "t_minmax_neuro_static", "t_neuro_temporal"],
latex_preamble = """
\\definecolor{pastel1}{RGB}{161, 201, 244}
\\definecolor{pastel2}{RGB}{255, 180, 130}
\\definecolor{pastel3}{RGB}{141, 229, 161}
\\definecolor{pastel4}{RGB}{255, 159, 155}
\\definecolor{pastel5}{RGB}{208, 187, 255}
\\definecolor{pastel6}{RGB}{222, 187, 155}
\\definecolor{pastel7}{RGB}{250, 176, 228}
\\definecolor{pastel8}{RGB}{207, 207, 207}
\\definecolor{pastel9}{RGB}{255, 254, 163}
\\definecolor{pastel10}{RGB}{185, 242, 240}
""",
# space_unit = (2.2, 3.9)./.2,
space_unit = (2.2, 3.9)./.75,
# min_n_inst =
)
main_tex_file = "tree$(file_suffix == "" ? "" : "-$(file_suffix)").tex"
save_tree_latex(
[tree],
savedir;
main_tex_file = main_tex_file,
common_kwargs...,
)
cd(savedir)
if !silent
run(`pdflatex $(main_tex_file)`);
else
# run(`bash -c "echo 2"`);
# run(`bash -c "echo 2 2\\\>\\&1 \\\> /dev/null"`);
run(`bash -c "pdflatex $(main_tex_file) 2\\\>\\&1 \\\> /dev/null"`);
end
pdf_name = replace(main_tex_file, ".tex" => ".pdf")
run(`evince $pdf_name`);
cd("..")
end
############################################################################################
############################################################################################
############################################################################################
using LinearAlgebra
using StatsBase
using SoleBase: Label, RLabel, CLabel
struct ConfusionMatrix{T<:Number}
########################################################################################
class_names::Vector
matrix::Matrix{T}
########################################################################################
overall_accuracy::Float64
kappa::Float64
mean_accuracy::Float64
accuracies::Vector{Float64}
F1s::Vector{Float64}
sensitivities::Vector{Float64}
specificities::Vector{Float64}
PPVs::Vector{Float64}
NPVs::Vector{Float64}
########################################################################################
function ConfusionMatrix(matrix::AbstractMatrix)
ConfusionMatrix(Symbol.(1:size(matrix, 1)), matrix)
end
function ConfusionMatrix(
class_names::Vector,
matrix::AbstractMatrix{T},
) where {T<:Number}
@assert size(matrix,1) == size(matrix,2) "Cannot instantiate ConfusionMatrix with matrix of size ($(size(matrix))"
n_classes = size(matrix,1)
@assert length(class_names) == n_classes "Cannot instantiate ConfusionMatrix with mismatching n_classes ($(n_classes)) and class_names $(class_names)"
ALL = sum(matrix)
TR = LinearAlgebra.tr(matrix)
F = ALL-TR
overall_accuracy = TR / ALL
prob_chance = (sum(matrix,dims=1) * sum(matrix,dims=2))[1] / ALL^2
kappa = (overall_accuracy - prob_chance) / (1.0 - prob_chance)
####################################################################################
TPs = Vector{Float64}(undef, n_classes)
TNs = Vector{Float64}(undef, n_classes)
FPs = Vector{Float64}(undef, n_classes)
FNs = Vector{Float64}(undef, n_classes)
for i in 1:n_classes
class = i
other_classes = [(1:i-1)..., (i+1:n_classes)...]
TPs[i] = sum(matrix[class,class])
TNs[i] = sum(matrix[other_classes,other_classes])
FNs[i] = sum(matrix[class,other_classes])
FPs[i] = sum(matrix[other_classes,class])
end
####################################################################################
# https://en.wikipedia.org/wiki/Accuracy_and_precision#In_binary_classification
accuracies = (TPs .+ TNs)./ALL
mean_accuracy = StatsBase.mean(accuracies)
# https://en.wikipedia.org/wiki/F-score
F1s = TPs./(TPs.+.5*(FPs.+FNs))
# https://en.wikipedia.org/wiki/Sensitivity_and_specificity
sensitivities = TPs./(TPs.+FNs)
specificities = TNs./(TNs.+FPs)
PPVs = TPs./(TPs.+FPs)
NPVs = TNs./(TNs.+FNs)
new{T}(class_names,
matrix,
overall_accuracy,
kappa,
mean_accuracy,
accuracies,
F1s,
sensitivities,
specificities,
PPVs,
NPVs,
)
end
function ConfusionMatrix(
actual::AbstractVector{L},
predicted::AbstractVector{L},
weights::Union{Nothing,AbstractVector{Z}} = nothing;
force_class_order = nothing,
) where {L<:CLabel,Z}
@assert length(actual) == length(predicted) "Cannot compute ConfusionMatrix with mismatching number of actual $(length(actual)) and predicted $(length(predicted)) labels."
if isnothing(weights)
weights = default_weights(actual)
end
@assert length(actual) == length(weights) "Cannot compute ConfusionMatrix with mismatching number of actual $(length(actual)) and weights $(length(weights)) labels."
class_labels = begin
class_labels = unique([actual; predicted])
if isnothing(force_class_order)
class_labels = sort(class_labels, lt=SoleBase.nat_sort)
else
@assert length(setdiff(force_class_order, class_labels)) == 0
class_labels = force_class_order
end
# Binary case: retain order of classes YES/NO
if length(class_labels) == 2 &&
startswith(class_labels[1], "YES") &&
startswith(class_labels[2], "NO")
class_labels = reverse(class_labels)
end
class_labels
end
_ninstances = length(actual)
_actual = zeros(Int, _ninstances)
_predicted = zeros(Int, _ninstances)
n_classes = length(class_labels)
for i in 1:n_classes
_actual[actual .== class_labels[i]] .= i
_predicted[predicted .== class_labels[i]] .= i
end
matrix = zeros(eltype(weights),n_classes,n_classes)
for (act,pred,w) in zip(_actual, _predicted, weights)
matrix[act,pred] += w
end
ConfusionMatrix(class_labels, matrix)
end
end
overall_accuracy(cm::ConfusionMatrix) = cm.overall_accuracy
kappa(cm::ConfusionMatrix) = cm.kappa
class_counts(cm::ConfusionMatrix) = sum(cm.matrix,dims=2)
function Base.show(io::IO, cm::ConfusionMatrix)
max_num_digits = maximum(length(string(val)) for val in cm.matrix)
println(io, "Confusion Matrix ($(length(cm.class_names)) classes):")
for (i,(row,class_name,sensitivity)) in enumerate(zip(eachrow(cm.matrix),cm.class_names,cm.sensitivities))
for val in row
print(io, lpad(val,max_num_digits+1," "))
end
println(io, "\t\t\t$(round(100*sensitivity, digits=2))%\t\t$(class_name)")
end
############################################################################
println(io, "accuracy =\t\t$(round(overall_accuracy(cm), digits=4))")
println(io, "κ =\t\t\t$(round(cm.kappa, digits=4))")
############################################################################
println(io, "sensitivities:\t\t$(round.(cm.sensitivities, digits=4))")
println(io, "specificities:\t\t$(round.(cm.specificities, digits=4))")
println(io, "PPVs:\t\t\t$(round.(cm.PPVs, digits=4))")
println(io, "NPVs:\t\t\t$(round.(cm.NPVs, digits=4))")
print(io, "F1s:\t\t\t$(round.(cm.F1s, digits=4))")
println(io, "\tmean_F1:\t$(round(cm.mean_accuracy, digits=4))")
print(io, "accuracies:\t\t$(round.(cm.accuracies, digits=4))")
println(io, "\tmean_accuracy:\t$(round(cm.mean_accuracy, digits=4))")
end
############################################################################################
############################################################################################
############################################################################################
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2099 | @testset "demo-juliacon2022.jl" begin
# Import ModalDecisionTrees.jl & MLJ
using ModalDecisionTrees
using MLJ
include("demo-juliacon2022-utils.jl");
################################################################################
# Obtain dataset (official) split
dataset_train, dataset_test = load_arff_dataset("NATOPS");
# Unpack split
X_train, y_train = dataset_train;
X_test, y_test = dataset_test;
# X_train[1,:"Elbow left, X coordinate"] = begin x = X_train[1,:"Elbow left, X coordinate"]; x[1] = NaN; x end
# X_train[:,:"Elbow left, X coordinatex"] = [
# begin x = Vector{Union{Float64,Missing}}(y); x[1] = missing; x end
# for y in X_train[:,:"Elbow left, X coordinate"]]
# names(X_train[:,[end]])
# X_train = moving_average.(X_train, 10, 10)
# X_train = ((x)->x[1:3]).(X_train)
# X_test = moving_average.(X_test, 10, 10)
# X_test = ((x)->x[1:3]).(X_test)
# X_train[:,:new] = [randn(2,2) for i in 1:nrow(X_train)]
# X_test[:, :new] = [randn(2,2) for i in 1:nrow(X_test)]
# X_train = X_train[:,[1,end]]
# X_test = X_test[:,[1, end]]
w = abs.(randn(nrow(X_train)))
# Instantiate model with standard pruning conditions
model = ModalDecisionTree()
# model = ModalDecisionTree(; relations = :RCC8)
################################################################################
# Train model & ring a bell :D
@time mach = machine(model, X_train, y_train, w) |> fit!
# run(`paplay /usr/share/sounds/freedesktop/stereo/complete.oga`);
# Print model
mach.report.printmodel()
# Test on the hold-out set &
# inspect the distribution of test instances across the leaves
y_test_preds = MLJ.predict(mach, X_test);
# predict(args...) = MLJ.predict(ModalDecisionTree(), args...);
# y_test_preds, test_tree = MLJ.predict(mach, X_test, y_test);
# Inspect confusion matrix
cm = ConfusionMatrix(y_test, y_test_preds; force_class_order=["I have command", "All clear", "Not clear", "Spread wings", "Fold wings", "Lock wings",]);
@test overall_accuracy(cm) > 0.6
# Render model in LaTeX
# show_latex(mach.fitresult.rawmodel; variable_names = [variable_names_latex], silent = true);
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 8931 | using Random
using ModalDecisionTrees
using MLJBase
include("$(dirname(dirname(pathof(ModalDecisionTrees))))/test/data/load.jl")
_X, _y = load_digits()
Xcube = cat(map(r->reshape(r, (8,8,1)), eachrow(_X))...; dims=4)
Xcube_small = Xcube
Xcube_small = mapslices(x->[
sum(x[1:2, 1:2]) sum(x[1:2, 3:4]) sum(x[1:2, 5:6]) sum(x[1:2, 7:8]);
sum(x[3:4, 1:2]) sum(x[3:4, 3:4]) sum(x[3:4, 5:6]) sum(x[3:4, 7:8]);
sum(x[5:6, 1:2]) sum(x[5:6, 3:4]) sum(x[5:6, 5:6]) sum(x[5:6, 7:8]);
sum(x[7:8, 1:2]) sum(x[7:8, 3:4]) sum(x[7:8, 5:6]) sum(x[7:8, 7:8]);
], Xcube; dims = [1,2])
Xnt = NamedTuple(zip(Symbol.(1:length(eachslice(Xcube_small; dims=3))), eachslice.(eachslice(Xcube_small; dims=3); dims=3)))
X = Xnt
y = string.(_y.-1)
N = length(y)
p = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = p[1:round(Int, N*.1)], p[round(Int, N*.1)+1:end]
############################################################################################
# # Full training TODO too costly
# mach = @time machine(ModalDecisionTree(;), X, y) |> fit!
# @show nnodes(fitted_params(mach).rawmodel) # @test nnodes(fitted_params(mach).rawmodel) == 191
# @show sum(predict_mode(mach, X) .== y) / length(y) # @test sum(predict_mode(mach, X) .== y) / length(y) > 0.92
############################################################################################
mach = @time machine(ModalDecisionTree(;), X, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 57
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.43
mach = @time machine(ModalDecisionTree(;
n_subfeatures = 0,
max_depth = 6,
min_samples_leaf = 5,
), X, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 45
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.41
mach = machine(ModalRandomForest(;
n_subfeatures = 0.7,
ntrees = 10,
sampling_fraction = 0.7,
max_depth = -1,
min_samples_leaf = 1,
min_samples_split = 2,
min_purity_increase = 0.0,
rng = Random.MersenneTwister(1)
), X, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 736
@test_nowarn predict_mode(mach, rows = test_idxs)
@test_nowarn MLJ.predict(mach, rows = test_idxs)
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.53
############################################################################################
# NamedTuple dataset
mach = @time machine(ModalDecisionTree(;), Xnt, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 57
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.43
mach = @time machine(ModalDecisionTree(;
relations = :IA7,
features = [minimum],
initconditions = :start_at_center,
), Xnt, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 43
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.58
mach = @time machine(ModalDecisionTree(;
relations = :IA7,
features = [minimum, maximum],
# initconditions = :start_at_center,
featvaltype = Float32,
), selectrows(Xnt, train_idxs), selectrows(y, train_idxs)) |> m->fit!(m)
@test nnodes(fitted_params(mach).rawmodel) == 57
@test sum(predict_mode(mach, selectrows(Xnt, test_idxs)) .== y[test_idxs]) / length(y[test_idxs]) > 0.43
############################################################################################
############################################################################################
############################################################################################
mach = @time machine(ModalDecisionTree(;), Xnt, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 57
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.43
mach = @time machine(ModalDecisionTree(;
n_subfeatures = 0,
max_depth = 6,
min_samples_leaf = 5,
), Xnt, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 45
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.41
@test_throws CompositeException mach = machine(ModalRandomForest(;
n_subfeatures = 3,
ntrees = 10,
sampling_fraction = 0.7,
max_depth = -1,
min_samples_leaf = 1,
min_samples_split = 2,
min_purity_increase = 0.0,
rng = Random.MersenneTwister(1),
), Xnt, y) |> m->fit!(m, rows = train_idxs)
mach = machine(ModalRandomForest(;
n_subfeatures = 0.2,
ntrees = 10,
sampling_fraction = 0.7,
max_depth = -1,
min_samples_leaf = 1,
min_samples_split = 2,
min_purity_increase = 0.0,
rng = Random.MersenneTwister(1),
), Xnt, y) |> m->fit!(m, rows = train_idxs)
@test nnodes(fitted_params(mach).rawmodel) == 768
@test sum(predict_mode(mach, rows = test_idxs) .== y[test_idxs]) / length(y[test_idxs]) > 0.51
# ############################################################################################
# ############################################################################################
# ############################################################################################
# using ImageFiltering
# using StatsBase
# kernel = [1 0 -1;
# 2 0 -2;
# 1 0 -1]
# im = imfilter(rand(10,10), kernel)
# im = imfilter(rand(2,2), kernel)
# recvedge(x) = (imfilter(x, [1;; -1]))
# rechedge(x) = (imfilter(x, [1;; -1]'))
# recvsobel(x) = (imfilter(x, [1 0 -1; 2 0 -2; 1 0 -1]))
# rechsobel(x) = (imfilter(x, [1 0 -1; 2 0 -2; 1 0 -1]'))
# vedge(x) = StatsBase.mean(recvedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(recvedge(x))
# hedge(x) = StatsBase.mean(rechedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(rechedge(x))
# vsobel(x) = StatsBase.mean(recvsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(recvsobel(x))
# hsobel(x) = StatsBase.mean(rechsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.mean(rechsobel(x))
# svedge(x) = StatsBase.sum(recvedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(recvedge(x))
# shedge(x) = StatsBase.sum(rechedge(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(rechedge(x))
# svsobel(x) = StatsBase.sum(recvsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(recvsobel(x))
# shsobel(x) = StatsBase.sum(rechsobel(x)) # prod(size(x)) == 1 ? Inf : StatsBase.sum(rechsobel(x))
# train_idxs, test_idxs = p[1:round(Int, N*.2)], p[round(Int, N*.2)+1:end]
# # train_idxs = train_idxs[1:10]
# mach = @time machine(ModalDecisionTree(;
# relations = :IA7,
# features = [hedge, vedge],
# # initconditions = :start_at_center,
# featvaltype = Float32,
# ), selectrows(Xnt, train_idxs), selectrows(y, train_idxs)) |> m->fit!(m)
# @show nnodes(fitted_params(mach).rawmodel) # @test nnodes(fitted_params(mach).rawmodel) == 71
# @show sum(predict_mode(mach, selectrows(Xnt, test_idxs)) .== y[test_idxs]) / length(y[test_idxs]) # @test sum(predict_mode(mach, selectrows(Xnt, test_idxs)) .== y[test_idxs]) / length(y[test_idxs]) > 0.73
# preds, tree2 = report(mach).sprinkle(selectrows(Xnt, test_idxs), selectrows(y, test_idxs));
# @show MLJ.accuracy(preds, selectrows(y, test_idxs)) # @test MLJ.accuracy(preds, selectrows(y, test_idxs)) > 0.75
# # printmodel.(joinrules(listrules(report(mach).model)); show_metrics = true, threshold_digits = 2);
# printmodel.(joinrules(listrules(ModalDecisionTrees.translate(tree2))); show_metrics = true, threshold_digits = 2);
# readmetrics.(joinrules(listrules(ModalDecisionTrees.translate(tree2))))
# # train_idxs = train_idxs[1:10]
# mach = @time machine(ModalDecisionTree(;
# relations = :IA7,
# features = [shedge, svedge],
# # initconditions = :start_at_center,
# featvaltype = Float32,
# ), selectrows(Xnt, train_idxs), selectrows(y, train_idxs)) |> m->fit!(m)
# @show nnodes(fitted_params(mach).rawmodel) # @test nnodes(fitted_params(mach).rawmodel) == 79
# @show sum(predict_mode(mach, selectrows(Xnt, test_idxs)) .== y[test_idxs]) / length(y[test_idxs]) # @test sum(predict_mode(mach, selectrows(Xnt, test_idxs)) .== y[test_idxs]) / length(y[test_idxs]) > 0.73
# preds, tree2 = report(mach).sprinkle(selectrows(Xnt, test_idxs), selectrows(y, test_idxs));
# @show MLJ.accuracy(preds, selectrows(y, test_idxs)) # @test MLJ.accuracy(preds, selectrows(y, test_idxs)) > 0.75
# # printmodel.(joinrules(listrules(report(mach).model)); show_metrics = true, threshold_digits = 2);
# printmodel.(joinrules(listrules(ModalDecisionTrees.translate(tree2))); show_metrics = true, threshold_digits = 2);
# readmetrics.(joinrules(listrules(ModalDecisionTrees.translate(tree2))))
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3853 |
using ModalDecisionTrees
using MLJ
using Random
X, y = @load_iris
model = ModalDecisionTree(; max_depth = 0)
mach = @time machine(model, X, y) |> fit!
@test height(fitted_params(mach).rawmodel) == 0
@test depth(fitted_params(mach).rawmodel) == 0
model = ModalDecisionTree(; max_depth = 2, )
mach = @time machine(model, X, y) |> fit!
@test depth(fitted_params(mach).rawmodel) == 2
model = ModalDecisionTree(;
min_samples_leaf = 2,
min_samples_split = 4,
min_purity_increase = 0.1,
max_purity_at_leaf = 1.0,
print_progress = true,
rng = 2
)
mach = @time machine(model, X, y) |> fit!
@test depth(fitted_params(mach).tree) == 4
################################################################################
using ModalDecisionTrees
using MLJ
using Random
X, y = @load_iris
model = ModalDecisionTree(;
max_purity_at_leaf = 1.0,
print_progress = true,
display_depth = 1,
rng = Random.MersenneTwister(2)
)
mach = @time machine(model, X, y) |> fit!
report(mach).printmodel()
################################################################################
using ModalDecisionTrees
using MLJ
using Random
X, y = @load_iris
model = ModalDecisionTree(;
max_purity_at_leaf = 1.0,
print_progress = true,
max_modal_depth = 2,
n_subfeatures = round(Int, length(Tables.columns(X)) * (0.5)),
# display_depth = nothing,
display_depth = 2,
rng = Random.MersenneTwister(2)
)
mach = @time machine(model, X, y) |> fit!
@test depth(fitted_params(mach).tree) == 6
@test_nowarn report(mach).printmodel()
@test_nowarn report(mach).printmodel(false, 0)
@test_nowarn report(mach).printmodel(true, 0)
@test_nowarn report(mach).printmodel(false, 2)
@test_nowarn report(mach).printmodel(true, 2)
@test_nowarn report(mach).printmodel(0)
@test_nowarn report(mach).printmodel(1)
@test_nowarn report(mach).printmodel(4)
@test_nowarn report(mach).printmodel(10)
report(mach).printmodel(; hidemodality=false)
report(mach).printmodel(hidemodality=false)
@test_nowarn report(mach).printmodel(show_metrics = true)
@test_nowarn report(mach).printmodel(show_intermediate_finals = true)
@test_nowarn report(mach).printmodel(show_metrics = true, show_intermediate_finals = true)
@test_nowarn report(mach).printmodel(show_metrics = true, show_intermediate_finals = true, max_depth=nothing)
@test_nowarn report(mach).printmodel(show_metrics = (;), show_intermediate_finals = 200, max_depth=nothing)
printmodel.(listrules(report(mach).model); show_metrics=true);
out1 = (io = IOBuffer(); report(mach).printmodel(io, true); String(take!(io)))
out2 = (io = IOBuffer(); report(mach).printmodel(io, false); String(take!(io)))
@test occursin("petal", out1)
@test occursin("petal", out2)
# @test occursin("petal", displaymodel(report(mach).model))
@test_nowarn listrules(report(mach).model)
@test_nowarn listrules(report(mach).model; use_shortforms=true)
@test_nowarn listrules(report(mach).model; use_shortforms=false)
printmodel.(listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true))
@test_nowarn listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true)
# @test_throws ErrorException listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true)
@test_nowarn listrules(report(mach).model; use_shortforms=false, use_leftmostlinearform = true)
@test_throws ErrorException listrules(report(mach).model; use_shortforms=false, use_leftmostlinearform = true, force_syntaxtree = true)
@test_nowarn report(mach).printmodel(true, 3; syntaxstring_kwargs = (;hidemodality = true))
model = ModalRandomForest()
mach = @time machine(model, X, y, w) |> fit!
Xnew = (sepal_length = [6.4, 7.2, 7.4],
sepal_width = [2.8, 3.0, 2.8],
petal_length = [5.6, 5.8, 6.1],
petal_width = [2.1, 1.6, 1.9],)
yhat = MLJ.predict(mach, Xnew)
yhat = MLJ.predict_mode(mach, X)
@test MLJ.accuracy(y, yhat) > 0.8
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1324 | using ModalDecisionTrees
using MLJ
################################################################################
X, y = @load_iris
w = abs.(randn(length(y)))
# w = fill(1, length(y))
# w = rand([1,2], length(y))
model = ModalDecisionTree()
mach = @time machine(model, X, y, w) |> fit!
Xnew = (sepal_length = [6.4, 7.2, 7.4],
sepal_width = [2.8, 3.0, 2.8],
petal_length = [5.6, 5.8, 6.1],
petal_width = [2.1, 1.6, 1.9],)
yhat = MLJ.predict(mach, Xnew)
yhat = MLJ.predict_mode(mach, Xnew)
yhat = MLJ.predict_mode(mach, X)
@test MLJ.accuracy(y, yhat) > 0.8
@test_nowarn fitted_params(mach).rawmodel
@test_nowarn report(mach).model
@test_nowarn printmodel(prune(fitted_params(mach).rawmodel, simplify=true, min_samples_leaf = 20), max_depth = 3)
@test_nowarn printmodel(prune(fitted_params(mach).rawmodel, simplify=true, min_samples_leaf = 20))
@test_nowarn printmodel(report(mach).model, header = false)
@test_nowarn printmodel(report(mach).model, header = :brief)
@test_nowarn printmodel(report(mach).model, header = true)
io = IOBuffer()
@test_nowarn printmodel(io, report(mach).model, show_subtree_info = true)
# String(take!(io))
@test_nowarn printmodel.((SoleModels.listrules(report(mach).model,)));
################################################################################
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 5197 | # Import packages
using Test
using MLJ
using ModalDecisionTrees
using SoleModels
using SoleData
using Random
# A Modal Decision Tree with ≥ 4 samples at leaf
t = ModalDecisionTree(;
min_samples_split=2,
min_samples_leaf = 4,
)
# Load an example dataset (a temporal one)
X, y = ModalDecisionTrees.load_japanesevowels()
X, varnames = SoleData.dataframe2cube(X)
p = randperm(Random.MersenneTwister(2), 100)
X, y = X[:, :, p], y[p]
X = NamedTuple(zip(Symbol.(1:length(eachslice(X; dims=2))), eachslice.(eachslice(X; dims=2); dims=2)))
nvars = length(X)
N = length(y)
mach = machine(t, X, y)
# Split dataset
p = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
# Fit
@time MLJ.fit!(mach, rows=train_idxs)
# Perform predictions, compute accuracy
yhat = MLJ.predict(mach, rows=test_idxs)
acc = sum(mode.(yhat) .== y[test_idxs])/length(yhat)
yhat = MLJ.predict_mode(mach, rows=test_idxs)
acc = sum(yhat .== y[test_idxs])/length(yhat)
@test acc >= 0.8
@test_nowarn report(mach).printmodel(syntaxstring_kwargs = (; variable_names_map = [('A':('A'+nvars))], threshold_digits = 2))
@test_throws BoundsError report(mach).printmodel(syntaxstring_kwargs = (; variable_names_map = [["a", "b"]]))
@test_throws BoundsError report(mach).printmodel(syntaxstring_kwargs = (; variable_names_map = ["a", "b"]))
@test_nowarn report(mach).printmodel(syntaxstring_kwargs = (; variable_names_map = 'A':('A'+nvars)))
@test_nowarn report(mach).printmodel(syntaxstring_kwargs = (; variable_names_map = collect('A':('A'+nvars))))
@test_nowarn printmodel(report(mach).model)
@test_nowarn listrules(report(mach).model)
@test_nowarn listrules(report(mach).model; use_shortforms=true)
@test_nowarn listrules(report(mach).model; use_shortforms=false)
@test_nowarn listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true)
@test_nowarn listrules(report(mach).model; use_shortforms=false, use_leftmostlinearform = true)
@test_throws ErrorException listrules(report(mach).model; use_shortforms=false, use_leftmostlinearform = true, force_syntaxtree = true)
# Access raw model
fitted_params(mach).rawmodel;
report(mach).printmodel(3);
@time MLJ.fit!(mach)
@test_nowarn feature_importances(mach)
############################################################################################
############################################################################################
############################################################################################
mach = @time machine(ModalDecisionTree(post_prune = true), X, y) |> MLJ.fit!
mach = @time machine(ModalDecisionTree(post_prune = true, max_modal_depth = 2), X, y) |> MLJ.fit!
mach = @time machine(ModalDecisionTree(min_samples_split=100, post_prune = true, merge_purity_threshold = 0.4), X, y) |> MLJ.fit!
mach = @time machine(ModalDecisionTree(n_subfeatures = 0.2,), X, y) |> MLJ.fit!
mach = @time machine(ModalDecisionTree(n_subfeatures = 2,), X, y) |> MLJ.fit!
mach = @time machine(ModalDecisionTree(n_subfeatures = x->ceil(Int64, div(x, 2)),), X, y) |> MLJ.fit!
mach = @time machine(ModalDecisionTree(downsize = false,), X, y) |> MLJ.fit!
############################################################################################
############################################################################################
############################################################################################
# NaNs
Xwithnans = deepcopy(X)
for i in 1:4
rng = MersenneTwister(i)
c = rand(rng, 1:length(Xwithnans))
r = rand(rng, 1:length(Xwithnans[c]))
Xwithnans[c][r][rand(1:length(Xwithnans[c][r]))] = NaN
@test_throws ErrorException @time machine(ModalDecisionTree(), Xwithnans, y) |> MLJ.fit!
end
############################################################################################
############################################################################################
############################################################################################
X, y = ModalDecisionTrees.load_japanesevowels()
X, varnames = SoleData.dataframe2cube(X)
multilogiset, var_grouping = ModalDecisionTrees.wrapdataset(X, ModalDecisionTree(; min_samples_leaf = 1))
# A Modal Decision Tree
t = ModalDecisionTree(min_samples_split=100, post_prune = true, merge_purity_threshold = true)
N = length(y)
p = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
mach = @test_logs (:warn,) machine(t, modality(multilogiset, 1), y)
@time MLJ.fit!(mach, rows=train_idxs)
yhat = MLJ.predict_mode(mach, rows=test_idxs)
acc = sum(yhat .== y[test_idxs])/length(yhat)
@test MLJ.kappa(yhat, y[test_idxs]) > 0.5
mach = @test_logs (:warn,) machine(t, multilogiset, y)
# Fit
@time MLJ.fit!(mach, rows=train_idxs)
yhat = MLJ.predict_mode(mach, rows=test_idxs)
acc = sum(yhat .== y[test_idxs])/length(yhat)
MLJ.kappa(yhat, y[test_idxs]) > 0.5
@test_nowarn yhat = MLJ.predict_mode(mach, multilogiset)
@test_nowarn prune(fitted_params(mach).rawmodel, simplify=true)
@test_nowarn prune(fitted_params(mach).rawmodel, simplify=true, min_samples_leaf = 20)
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 6599 | using Test
using Logging
using MLJ
using SoleData
using SoleModels
using ModalDecisionTrees
using MLDatasets
DOWNSIZE_WINDOW = (3,3)
Xcube, y = begin
if MNIST isa Base.Callable # v0.7
trainset = MNIST(:train)
trainset[:]
else # v0.5
MNIST.traindata()
end
end
y = string.(y)
N = length(y)
p = 1:100
p_test = 101:1000 # N
begin
X = SoleData.cube2dataframe(Xcube)
X_train, y_train = X[p,:], y[p]
X_test, y_test = X[p_test,:], y[p_test]
model = ModalDecisionTree()
mach = @time machine(model, X_train, y_train) |> fit!
report(mach).printmodel(1000; threshold_digits = 2);
yhat_test = MLJ.predict_mode(mach, X_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.2
end
_s = collect(size(Xcube))
insert!(_s, length(_s), 1)
Xcube = reshape(Xcube, _s...)
X = SoleData.cube2dataframe(Xcube, ["black"])
X_train, y_train = X[p,:], y[p]
X_test, y_test = X[p_test,:], y[p_test]
# begin
# model = ModalDecisionTree()
# mach = @time machine(model, X_train, y_train) |> fit!
# report(mach).printmodel(1000; threshold_digits = 2);
# yhat_test = MLJ.predict_mode(mach, X_test)
# MLJ.accuracy(y_test, yhat_test)
# @test MLJ.accuracy(y_test, yhat_test) > 0.2
# end
# begin
# model = ModalDecisionTree(; relations = :IA7,)
# mach = @time machine(model, X_train, y_train) |> fit!
# report(mach).printmodel(1000; threshold_digits = 2);
# yhat_test = MLJ.predict_mode(mach, X_test)
# MLJ.accuracy(y_test, yhat_test)
# @test MLJ.accuracy(y_test, yhat_test) > 0.2
# end
begin
model = ModalDecisionTree(; downsize = DOWNSIZE_WINDOW)
mach = @time machine(model, X_train, y_train) |> fit!
report(mach).printmodel(1000; threshold_digits = 2);
yhat_test = MLJ.predict_mode(mach, X_test)
MLJ.accuracy(y_test, yhat_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.12
end
begin
model = ModalDecisionTree(; relations = :IA7, downsize = DOWNSIZE_WINDOW)
mach = @time machine(model, X_train, y_train) |> fit!
report(mach).printmodel(1000; threshold_digits = 2);
yhat_test = MLJ.predict_mode(mach, X_test)
MLJ.accuracy(y_test, yhat_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.26
end
begin
recheight(x) = Float32(size(x, 1))
recwidth(x) = Float32(size(x, 2))
model = ModalDecisionTree(;
relations = :IA7,
features = [minimum, maximum, recheight, recwidth],
featvaltype = Float32,
downsize = DOWNSIZE_WINDOW,
)
mach1 = @time machine(model, X_train, y_train) |> fit!
model = ModalDecisionTree(;
relations = :IA7,
features = [recheight, recwidth, minimum, maximum],
featvaltype = Float32,
downsize = DOWNSIZE_WINDOW,
)
mach2 = @time machine(model, X_train, y_train) |> fit!
report(mach1).printmodel(1000; threshold_digits = 2);
report(mach2).printmodel(1000; threshold_digits = 2);
@test_broken displaymodel(fitted_params(mach1).solemodel) == displaymodel(fitted_params(mach2).solemodel)
yhat_test = MLJ.predict_mode(mach1, X_test)
MLJ.accuracy(y_test, yhat_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.25
end
begin
recheight(x) = Float32(size(x, 1))
recwidth(x) = Float32(size(x, 2))
model = ModalDecisionTree(;
relations = :IA7,
features = [minimum, maximum, recheight, recwidth],
initconditions = :start_at_center,
featvaltype = Float32,
downsize = DOWNSIZE_WINDOW,
# features = [minimum, maximum, UnivariateFeature{Float64}(recheight), UnivariateFeature{Float64}(recwidth)],
# features = [minimum, maximum, UnivariateFeature{Float32}(1, recheight), UnivariateFeature{Float32}(1, recwidth)],
)
mach = @time machine(model, X_train, y_train) |> fit!
report(mach).printmodel(1000; threshold_digits = 2);
yhat_test = MLJ.predict_mode(mach, X_test)
MLJ.accuracy(y_test, yhat_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.25
end
begin
model = ModalDecisionTree(;
relations = :IA3,
features = [minimum],
initconditions = :start_at_center,
featvaltype = Float32,
downsize = DOWNSIZE_WINDOW,
# features = [minimum, maximum, UnivariateFeature{Float64}(recheight), UnivariateFeature{Float64}(recwidth)],
# features = [minimum, maximum, UnivariateFeature{Float32}(1, recheight), UnivariateFeature{Float32}(1, recwidth)],
)
mach = @time machine(model, X_train, y_train) |> fit!
report(mach).printmodel(1000; threshold_digits = 2);
yhat_test = MLJ.predict_mode(mach, X_test)
MLJ.accuracy(y_test, yhat_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.25
end
begin
model = ModalDecisionTree(;
relations = :IA7,
features = [minimum],
initconditions = :start_at_center,
# downsize = (x)->ModalDecisionTrees.MLJInterface.moving_average(x, (10,10)),
downsize = (x)->ModalDecisionTrees.MLJInterface.moving_average(x, DOWNSIZE_WINDOW),
)
mach = @test_logs min_level=Logging.Error @time machine(model, X_train, y_train) |> fit!
model = ModalDecisionTree(;
relations = :IA7,
features = [minimum],
initconditions = :start_at_center,
downsize = DOWNSIZE_WINDOW,
# downsize = (10,10),
# features = [minimum, maximum, UnivariateFeature{Float64}(recheight), UnivariateFeature{Float64}(recwidth)],
# features = [minimum, maximum, UnivariateFeature{Float32}(1, recheight), UnivariateFeature{Float32}(1, recwidth)],
)
mach = @test_logs min_level=Logging.Error @time machine(model, X_train, y_train) |> fit!
report(mach).printmodel(1000; threshold_digits = 2);
yhat_test = MLJ.predict_mode(mach, X_test)
@test MLJ.accuracy(y_test, yhat_test) > 0.25
yhat_test2, tree2 = report(mach).sprinkle(X_test, y_test);
@test yhat_test2 == yhat_test
soletree2 = ModalDecisionTrees.translate(tree2)
@test_nowarn printmodel(soletree2; show_metrics = true);
@test_nowarn printmodel.(listrules(soletree2); show_metrics = true, threshold_digits = 2);
@test_nowarn printmodel.(joinrules(listrules(soletree2)); show_metrics = true, threshold_digits = 2);
SoleModels.info.(listrules(soletree2), :supporting_labels);
_leaves = consequent.(listrules(soletree2))
SoleModels.readmetrics.(_leaves)
zip(SoleModels.readmetrics.(_leaves),_leaves) |> collect |> sort
# @test MLJ.accuracy(y_test, yhat_test) > 0.4 with DOWNSIZE_WINDOW = (10,10)
@test MLJ.accuracy(y_test, yhat_test) > 0.27
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 4337 | using MLJ
using ModalDecisionTrees
using MLDatasets # TODO remove?
using DataFrames
using Random
# Regression, spatial problem
using RDatasets
channing = RDatasets.dataset("robustbase", "radarImage")
N = maximum(channing[:,:XCoord])
M = maximum(channing[:,:YCoord])
Xcube = fill(Inf, N, M, 3)
for r in eachrow(channing)
Xcube[r[:XCoord], r[:YCoord], 1] = r[:Band1]
Xcube[r[:XCoord], r[:YCoord], 2] = r[:Band2]
Xcube[r[:XCoord], r[:YCoord], 3] = r[:Band3]
end
samplemap = (x->all(!isinf,x)).(eachslice(Xcube; dims=(1,2)))
_s = size(samplemap)
samplemap[[1,end],:] .= 0
samplemap[:,[1,end]] .= 0
samplemap = cat(moving_average(eachslice(samplemap; dims=1); window_size=2, window_step=1)...; dims=2)'
samplemap = cat(moving_average(eachslice(samplemap; dims=2); window_size=2, window_step=1)...; dims=2)
samplemap = hcat(eachslice(samplemap; dims=2)..., zeros(size(samplemap, 1)))
samplemap = hcat(eachslice(samplemap; dims=1)..., zeros(size(samplemap, 2)))'
samplemap = (samplemap .== 1.0)
@assert _s == size(samplemap)
samples = [begin
X = Xcube[(idx[1]-1):(idx[1]+1), (idx[2]-1):(idx[2]+1), [1,2]]
y = Xcube[idx[1], idx[2], 3]
(X, y)
end for idx in findall(isone, samplemap)]
samples = filter(s->all(!isinf, first(s)), samples)
shuffle!(samples)
X = DataFrame([((x)->x[:,:,1]).(first.(samples)), ((x)->x[:,:,2]).(first.(samples))], :auto)
y = last.(samples)
N = length(y)
mach = machine(ModalDecisionTree(min_samples_leaf=4), X, y)
# Split dataset
p = randperm(Random.MersenneTwister(1), N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
# Fit
MLJ.fit!(mach, rows=train_idxs)
yhat = MLJ.predict(mach, rows=test_idxs)
mae = MLJ.mae(mean.(yhat), y[test_idxs])
mae = MLJ.mae(MLJ.predict_mean(mach, rows=test_idxs), y[test_idxs])
mae = MLJ.mae(MLJ.predict_mean(mach, rows=train_idxs), y[train_idxs])
t = ModalDecisionTree(relations = :RCC5, min_samples_leaf=2)
mach = machine(t, X, y)
MLJ.fit!(mach, rows=train_idxs)
report(mach).printmodel(1000; threshold_digits = 2);
listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true)
fs = SoleData.antecedent.(listrules(report(mach).model; use_shortforms=true, use_leftmostlinearform = true))
fsnorm = map(f->normalize(modforms(f)[1]; allow_atom_flipping = true), fs)
# TODO: expand to implicationstate
function knowntoimply(t1::SyntaxTree, t2::SyntaxTree)
# @show t1
# @show t2
_diamg = SoleLogics.DiamondRelationalConnective(globalrel)
_boxg = SoleLogics.BoxRelationalConnective(globalrel)
@assert arity(_diamg) == 1
@assert arity(_boxg) == 1
if token(t1) == _boxg && token(t2) == _diamg
knowntoimply(children(t1)[1], children(t2)[1])
elseif token(t1) == _boxg && token(t2) == _boxg
knowntoimply(children(t1)[1], children(t2)[1])
elseif token(t1) == _diamg && token(t2) == _diamg
knowntoimply(children(t1)[1], children(t2)[1])
elseif token(t1) isa Atom{<:ScalarCondition} && token(t2) isa Atom{<:ScalarCondition}
c1 = SoleLogics.value(token(t1))
c2 = SoleLogics.value(token(t2))
# if SoleData.metacond(c1) == SoleData.metacond(c2)
# @show c1, c2
# @show SoleData.test_operator(c1)(SoleData.threshold(c1), SoleData.threshold(c2))
# end
(SoleData.metacond(c1) == SoleData.metacond(c2) && SoleData.test_operator(c1)(SoleData.threshold(c1), SoleData.threshold(c2)))
else
false
end
end
function _simplify(φ::SyntaxTree)
if token(φ) in [CONJUNCTION, DISJUNCTION]
φ = LeftmostLinearForm(φ)
chs = children(φ)
for i in length(chs):-1:1
ch1 = chs[i]
for ch2 in chs
if (token(φ) == CONJUNCTION && knowntoimply(ch2, ch1)) ||
(token(φ) == DISJUNCTION && knowntoimply(ch1, ch2))
deleteat!(chs, i)
break
end
end
end
tree(LeftmostLinearForm(SoleLogics.connective(φ), chs))
else
φ
end
end
_simplify.(fsnorm)
syntaxstring.(_simplify.(fsnorm)) .|> println;
printmodel.(listrules(report(mach).model); show_metrics = true, threshold_digits = 2);
mae = MLJ.mae(MLJ.predict_mean(mach, rows=test_idxs), y[test_idxs])
mae = MLJ.mae(MLJ.predict_mean(mach, rows=train_idxs), y[train_idxs])
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 10904 |
using Tables
# using ARFFFiles
using DataFrames
using StatsBase
import MLJModelInterface: fit
using HTTP
using ZipFile
using DataStructures
variable_names_latex = [
"\\text{hand tip}_X^L",
"\\text{hand tip}_Y^L",
"\\text{hand tip}_Z^L",
"\\text{hand tip}_X^R",
"\\text{hand tip}_Y^R",
"\\text{hand tip}_Z^R",
"\\text{elbow}_X^L",
"\\text{elbow}_Y^L",
"\\text{elbow}_Z^L",
"\\text{elbow}_X^R",
"\\text{elbow}_Y^R",
"\\text{elbow}_Z^R",
"\\text{wrist}_X^L",
"\\text{wrist}_Y^L",
"\\text{wrist}_Z^L",
"\\text{wrist}_X^R",
"\\text{wrist}_Y^R",
"\\text{wrist}_Z^R",
"\\text{thumb}_X^L",
"\\text{thumb}_Y^L",
"\\text{thumb}_Z^L",
"\\text{thumb}_X^R",
"\\text{thumb}_Y^R",
"\\text{thumb}_Z^R",
]
function load_arff_dataset(dataset_name, path = "http://www.timeseriesclassification.com/aeon-toolkit/$(dataset_name).zip")
# function load_arff_dataset(dataset_name, path = "../datasets/Multivariate_arff/$(dataset_name)")
(X_train, y_train), (X_test, y_test) = begin
if(any(startswith.(path, ["https://", "http://"])))
r = HTTP.get(path);
z = ZipFile.Reader(IOBuffer(r.body))
# (
# ARFFFiles.load(DataFrame, z.files[[f.name == "$(dataset_name)_TRAIN.arff" for f in z.files]][1]),
# ARFFFiles.load(DataFrame, z.files[[f.name == "$(dataset_name)_TEST.arff" for f in z.files]][1]),
# )
(
read(z.files[[f.name == "$(dataset_name)_TRAIN.arff" for f in z.files]][1], String) |> parseARFF,
read(z.files[[f.name == "$(dataset_name)_TEST.arff" for f in z.files]][1], String) |> parseARFF,
)
else
(
# ARFFFiles.load(DataFrame, "$(path)/$(dataset_name)_TRAIN.arff"),
# ARFFFiles.load(DataFrame, "$(path)/$(dataset_name)_TEST.arff"),
read(z.files[[f.name == "$(path)/$(dataset_name)_TRAIN.arff" for f in z.files]][1], String) |> parseARFF,
read(z.files[[f.name == "$(path)/$(dataset_name)_TEST.arff" for f in z.files]][1], String) |> parseARFF,
)
end
end
@assert dataset_name == "NATOPS" "This code is only for showcasing. Need to expand code to comprehend more datasets."
# variable_names = [
# "Hand tip left, X coordinate",
# "Hand tip left, Y coordinate",
# "Hand tip left, Z coordinate",
# "Hand tip right, X coordinate",
# "Hand tip right, Y coordinate",
# "Hand tip right, Z coordinate",
# "Elbow left, X coordinate",
# "Elbow left, Y coordinate",
# "Elbow left, Z coordinate",
# "Elbow right, X coordinate",
# "Elbow right, Y coordinate",
# "Elbow right, Z coordinate",
# "Wrist left, X coordinate",
# "Wrist left, Y coordinate",
# "Wrist left, Z coordinate",
# "Wrist right, X coordinate",
# "Wrist right, Y coordinate",
# "Wrist right, Z coordinate",
# "Thumb left, X coordinate",
# "Thumb left, Y coordinate",
# "Thumb left, Z coordinate",
# "Thumb right, X coordinate",
# "Thumb right, Y coordinate",
# "Thumb right, Z coordinate",
# ]
variable_names = [
"X[Hand tip l]",
"Y[Hand tip l]",
"Z[Hand tip l]",
"X[Hand tip r]",
"Y[Hand tip r]",
"Z[Hand tip r]",
"X[Elbow l]",
"Y[Elbow l]",
"Z[Elbow l]",
"X[Elbow r]",
"Y[Elbow r]",
"Z[Elbow r]",
"X[Wrist l]",
"Y[Wrist l]",
"Z[Wrist l]",
"X[Wrist r]",
"Y[Wrist r]",
"Z[Wrist r]",
"X[Thumb l]",
"Y[Thumb l]",
"Z[Thumb l]",
"X[Thumb r]",
"Y[Thumb r]",
"Z[Thumb r]",
]
variable_names_latex = [
"\\text{hand tip l}_X",
"\\text{hand tip l}_Y",
"\\text{hand tip l}_Z",
"\\text{hand tip r}_X",
"\\text{hand tip r}_Y",
"\\text{hand tip r}_Z",
"\\text{elbow l}_X",
"\\text{elbow l}_Y",
"\\text{elbow l}_Z",
"\\text{elbow r}_X",
"\\text{elbow r}_Y",
"\\text{elbow r}_Z",
"\\text{wrist l}_X",
"\\text{wrist l}_Y",
"\\text{wrist l}_Z",
"\\text{wrist r}_X",
"\\text{wrist r}_Y",
"\\text{wrist r}_Z",
"\\text{thumb l}_X",
"\\text{thumb l}_Y",
"\\text{thumb l}_Z",
"\\text{thumb r}_X",
"\\text{thumb r}_Y",
"\\text{thumb r}_Z",
]
X_train = fix_dataframe(X_train, variable_names)
X_test = fix_dataframe(X_test, variable_names)
class_names = [
"I have command",
"All clear",
"Not clear",
"Spread wings",
"Fold wings",
"Lock wings",
]
fix_class_names(y) = class_names[round(Int, parse(Float64, y))]
y_train = map(fix_class_names, y_train)
y_test = map(fix_class_names, y_test)
@assert nrow(X_train) == length(y_train) "$(nrow(X_train)), $(length(y_train))"
((X_train, y_train), (X_test, y_test))
end
const _ARFF_SPACE = UInt8(' ')
const _ARFF_COMMENT = UInt8('%')
const _ARFF_AT = UInt8('@')
const _ARFF_SEP = UInt8(',')
const _ARFF_NEWLINE = UInt8('\n')
const _ARFF_NOMSTART = UInt8('{')
const _ARFF_NOMEND = UInt8('}')
const _ARFF_ESC = UInt8('\\')
const _ARFF_MISSING = UInt8('?')
const _ARFF_RELMARK = UInt8('\'')
# function readARFF(path::String)
# open(path, "r") do io
# df = DataFrame()
# classes = String[]
# lines = readlines(io) ...
function parseARFF(arffstring::String)
df = DataFrame()
classes = String[]
lines = split(arffstring, "\n")
for i in 1:length(lines)
line = lines[i]
# If not empty line or comment
if !isempty(line)
if UInt8(line[1]) != _ARFF_COMMENT
sline = split(line, " ")
# println(sline[1][1])
# If the first symbol is @
if UInt8(sline[1][1]) == _ARFF_AT
# If @relation
if sline[1][2:end] == "relation"
# println("Relation: " * sline[2])
end
# if sline[1][2:end] == "variable" && sline[2] == "class"
# classes = sline[3][2:end-1]
# println(classes)
# end
# data, first char is '
elseif UInt8(sline[1][1]) == _ARFF_RELMARK
sline[1] = sline[1][2:end]
data_and_class = split(sline[1],"\'")
string_data = split(data_and_class[1], "\\n")
class = data_and_class[2][2:end]
if isempty(names(df))
for i in 1:length(string_data)
insertcols!(df, Symbol("V$(i)") => Array{Float64, 1}[]) # add the variables as 1,2,3,ecc.
end
end
float_data = Dict{Int,Vector{Float64}}()
for i in 1:length(string_data)
float_data[i] = map(x->parse(Float64,x), split(string_data[i], ","))
end
# @show float_data
push!(df, [float_data[i] for i in 1:length(string_data)])
push!(classes, class)
# @show data
# @show class
end
end
end
end
# for i in eachrow(df)
# println(typeof(i))
# break
# end
p = sortperm(eachrow(df), by=x->classes[rownumber(x)])
return df[p, :], classes[p]
end
function fix_dataframe(df, variable_names = nothing)
s = unique(size.(df[:,1]))
@assert length(s) == 1 "$(s)"
@assert length(s[1]) == 1 "$(s[1])"
nvars, npoints = length(names(df)), s[1][1]
old_var_names = names(df)
X = OrderedDict()
if isnothing(variable_names)
variable_names = ["V$(i_var)" for i_var in 1:nvars]
end
@assert nvars == length(variable_names)
for (i_var,var) in enumerate(variable_names)
X[Symbol(var)] = [row[i_var] for row in eachrow(df)]
end
X = DataFrame(X)
# Y = df[:,end]
# X, string.(Y)
# X, Y
end
function show_latex(tree; file_suffix = "", variable_names = nothing, silent = true)
include("../results/utils/print-tree-to-latex.jl")
savedir = "latex"
additional_dict = Dict{String, String}(
"predictionI have command" => "\\fcolorbox{black}{pastel1}{\\ \\ I have command\\ \\ }",
"predictionAll clear" => "\\fcolorbox{black}{pastel2}{\\ \\ All clear\\ \\ }",
"predictionNot clear" => "\\fcolorbox{black}{pastel3}{\\ \\ Not clear\\ \\ }",
"predictionSpread wings" => "\\fcolorbox{black}{pastel4}{\\ \\ Spread wings\\ \\ }",
"predictionFold wings" => "\\fcolorbox{black}{pastel5}{\\ \\ Fold wings\\ \\ }",
"predictionLock wings" => "\\fcolorbox{black}{pastel6}{\\ \\ Lock wings\\ \\ }",
)
common_kwargs = (
conversion_dict = additional_dict,
# threshold_scale_factor = 3,
threshold_show_decimals = 2,
hide_modality_ids = true,
variable_names_map = variable_names,
# replace_dict = Dict([
# # "\\{1\\}" => "",
# # "{1}" => "",
# "NN" => "N",
# ]),
scale = 1.,
height = ["25em", "25em", "20em", "22em", "22em", "22em"],
decisions_at_nodes = false,
edges_textsize = [:Large, :small, :Large, :small, :small, :small],
tree_names = ["t_minmax_static", "t_minmax_temporal", "t_neuro_static", "t_minmax_neuro_temporal", "t_minmax_neuro_static", "t_neuro_temporal"],
latex_preamble = """
\\definecolor{pastel1}{RGB}{161, 201, 244}
\\definecolor{pastel2}{RGB}{255, 180, 130}
\\definecolor{pastel3}{RGB}{141, 229, 161}
\\definecolor{pastel4}{RGB}{255, 159, 155}
\\definecolor{pastel5}{RGB}{208, 187, 255}
\\definecolor{pastel6}{RGB}{222, 187, 155}
\\definecolor{pastel7}{RGB}{250, 176, 228}
\\definecolor{pastel8}{RGB}{207, 207, 207}
\\definecolor{pastel9}{RGB}{255, 254, 163}
\\definecolor{pastel10}{RGB}{185, 242, 240}
""",
# space_unit = (2.2, 3.9)./.2,
space_unit = (2.2, 3.9)./.75,
# min_n_inst =
)
main_tex_file = "tree$(file_suffix == "" ? "" : "-$(file_suffix)").tex"
save_tree_latex(
[tree],
savedir;
main_tex_file = main_tex_file,
common_kwargs...,
)
cd(savedir)
if !silent
run(`pdflatex $(main_tex_file)`);
else
# run(`bash -c "echo 2"`);
# run(`bash -c "echo 2 2\\\>\\&1 \\\> /dev/null"`);
run(`bash -c "pdflatex $(main_tex_file) 2\\\>\\&1 \\\> /dev/null"`);
end
pdf_name = replace(main_tex_file, ".tex" => ".pdf")
run(`evince $pdf_name`);
cd("..")
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 906 | ### Classification - Heterogeneously typed features (ints, floats, bools, strings)
@testset "heterogeneous.jl" begin
m, n = 10^2, 5
tf = [trues(Int(m/2)) falses(Int(m/2))]
inds = Random.randperm(m)
labels = string.(tf[inds])
features = Array{Any}(undef, m, n)
features[:,:] = randn(m, n)
features[:,2] = string.(tf[Random.randperm(m)])
features[:,3] = map(t -> round.(Int, t), features[:,3])
features[:,4] = tf[inds]
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test MLJ.accuracy(labels, preds) > 0.9
n_subfeatures = 2
ntrees = 3
model = build_forest(labels, features, n_subfeatures, ntrees)
preds = apply_forest(model, features)
@test MLJ.accuracy(labels, preds) > 0.9
n_subfeatures = 7
model, coeffs = build_adaboost_stumps(labels, features, n_subfeatures)
preds = apply_adaboost_stumps(model, coeffs, features)
@test MLJ.accuracy(labels, preds) > 0.9
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3016 | # Classification Test - Iris Data Set
# https://archive.ics.uci.edu/ml/datasets/iris
@testset "iris.jl" begin
features, labels = load_data("iris")
labels = String.(labels)
classes = sort(unique(labels))
n = length(labels)
# train a decision stump (depth=1)
model = build_stump(labels, features)
preds = apply_tree(model, features)
@test MLJ.accuracy(labels, preds) > 0.6
@test depth(model) == 1
probs = apply_tree_proba(model, features, classes)
@test reshape(sum(probs, dims=2), n) ≈ ones(n)
# train full-tree classifier (over-fit)
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test MLJ.accuracy(labels, preds) == 1.0
@test length(model) == 9
@test depth(model) == 5
@test preds isa Vector{String}
print_model(model)
probs = apply_tree_proba(model, features, classes)
@test reshape(sum(probs, dims=2), n) ≈ ones(n)
# prune tree to 8 leaves
pruning_purity = 0.9
pt = prune(model, pruning_purity)
@test length(pt) == 8
preds = apply_tree(pt, features)
@test 0.99 < MLJ.accuracy(labels, preds) < 1.0
# prune tree to 3 leaves
pruning_purity = 0.6
pt = prune(model, pruning_purity)
@test length(pt) == 3
preds = apply_tree(pt, features)
@test 0.95 < MLJ.accuracy(labels, preds) < 1.0
probs = apply_tree_proba(model, features, classes)
@test reshape(sum(probs, dims=2), n) ≈ ones(n)
# prune tree to a stump, 2 leaves
pruning_purity = 0.5
pt = prune(model, pruning_purity)
@test length(pt) == 2
preds = apply_tree(pt, features)
@test 0.66 < MLJ.accuracy(labels, preds) < 1.0
# run n-fold cross validation for pruned tree
println("\n##### nfoldCV Classification Tree #####")
nfolds = 3
accuracy = nfoldCV_tree(labels, features, nfolds)
@test mean(accuracy) > 0.8
# train random forest classifier
ntrees = 10
n_subfeatures = 2
sampling_fraction = 0.5
model = build_forest(labels, features, n_subfeatures, ntrees, sampling_fraction)
preds = apply_forest(model, features)
@test MLJ.accuracy(labels, preds) > 0.95
@test preds isa Vector{String}
probs = apply_forest_proba(model, features, classes)
@test reshape(sum(probs, dims=2), n) ≈ ones(n)
# run n-fold cross validation for forests
println("\n##### nfoldCV Classification Forest #####")
n_subfeatures = 2
ntrees = 10
n_folds = 3
sampling_fraction = 0.5
accuracy = nfoldCV_forest(labels, features, nfolds, n_subfeatures, ntrees, sampling_fraction)
@test mean(accuracy) > 0.9
# train adaptive-boosted decision stumps
n_iterations = 15
model, coeffs = build_adaboost_stumps(labels, features, n_iterations)
preds = apply_adaboost_stumps(model, coeffs, features)
@test MLJ.accuracy(labels, preds) > 0.9
@test preds isa Vector{String}
probs = apply_adaboost_stumps_proba(model, coeffs, features, classes)
@test reshape(sum(probs, dims=2), n) ≈ ones(n)
# run n-fold cross validation for boosted stumps, using 7 iterations and 3 folds
println("\n##### nfoldCV Classification Adaboosted Stumps #####")
n_iterations = 15
nfolds = 3
accuracy = nfoldCV_stumps(labels, features, nfolds, n_iterations)
@test mean(accuracy) > 0.9
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.