licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 3532 | using Dates
@testset "Exp2.jl" begin
filename = "data\\test.exp2"
df = DDR2import.Exp2.read(filename)
#EXP
@test df[2,:ADEP] == "EHAM"
@test df[2,:ADES] == "UUEE"
@test df[2,:ACTYPE] == "A321"
@test df[2,:RFL] == 350
@test df[2,:ZONE_ORIG] == "EHAM"
@test df[2,:ZONE_DEST] == "UUEE"
@test df[2,:FLIGHT_ID] == 213793846
@test Dates.year(df[2,:ETD_DATETIME]) == 2017
@test Dates.month(df[2,:ETD_DATETIME]) == 12
@test Dates.day(df[2,:ETD_DATETIME]) == 15
@test Dates.hour(df[2,:ETD_DATETIME]) == 21
@test Dates.minute(df[2,:ETD_DATETIME]) == 49
@test df[2,:ETA_DATETIME] === missing
@test Dates.day(df[1,:ETA_DATETIME]) == 15
@test Dates.minute(df[1,:ETA_DATETIME]) == 0
@test Dates.day(df[3,:ETA_DATETIME]) == 15
@test Dates.minute(df[3,:ETA_DATETIME]) == 0
@test df[2,:CALLSIGN] == "AFL2193"
@test df[2,:COMPANY] == "AFL"
#UUID
@test df[2,:UUID] == "AFL2193-EHAM-UUEE-20171215213500"
@test df[2,:FIPS_CLONED] == "N"
#FLF
#TODO test FLIGHT_SAAM_ID
#TODO test FLIGHT_SAMAD_ID
@test df[2,:TACT_ID] == 512114
#TODO test SSR_CODE
#TODO test REGISTRATION
@test Dates.year(df[2,:PTD_DATETIME]) == 2017
@test Dates.minute(df[2,:PTD_DATETIME]) == 35
@test df[2,:ATFM_DELAY] == 0
#TODO test REROUTING_STATE
@test df[2,:MOST_PEN_REG] == "X"
#TODO test TYPE_OF_FLIGHT
#TODO test EQUIPMENT
#TODO test ICAO_EQUIP
#TODO test COM_EQUIP
#TODO test NAV_EQUIP
#TODO test SSR_EQUIP
#TODO test SURVIVAL_EQUIP
#TODO test PERSONS_ON_BOARD
#TODO test TOP_FL
@test df[2,:MAX_RFL] == 350
#TODO test FLT_PLN_SOURCE
#ALLFT
@test Dates.minute(df[2,:AOBT]) == 40
@test df[2, :IFPSID] == "AA70883446"
#TODO test IOBT
#TODO test ORIGFLIGHTDATAQUALITY
#TODO test FLIGHTDATAQUALITY
#TODO test SOURCE
@test df[2, :EXEMPTREASON] == "NEXE"
@test df[2, :EXEMPTREASONDIST] == "NEXE"
#TODO test LATEFILER
#TODO test LATEUPDATER
#TODO test NORTHATLANTIC
@test df[2, :COBT] === missing
#TODO test COBT
@test Dates.minute(df[2,:EOBT]) == 35
#TODO test FLIGHTSTATE
#TODO test PREV2ACTIVATIONFLIGHTSTATE
#TODO test SUSPENSIONSTATUS
#TODO test SAMCTOT
#TODO test SAMSENT
#TODO test SIPCTOT
#TODO test SIPSENT
#TODO test SLOTFORCED
#TODO test MOSTPENALIZINGREGID
#TODO test REGAFFECTEDBYNROFINST
#TODO test EXCLFROMNROFINST
#TODO test LASTRECEIVEDATFMMESSAGETITLE
#TODO test LASTRECEIVEDMESSAGETITLE
#TODO test LASTSENTATFMMESSAGETITLE
#TODO test MANUALEXEMPREASON
#TODO test SENSITIVEFLIGHT
#TODO test READYFORIMPROVEMENT
#TODO test READYFORDEP
@test df[2, :REVISEDTAXITIME] == 840
#TODO test TIS
#TODO test TRS
#TODO test TOBESENTSLOTMESSAGE
#TODO test TOBESENTPROPMESSAGETITLE
#TODO test LASTSENTSLOTMESSAGETITLE
#TODO test LASTSENTPROPMESSAGETITLE
#TODO test LASTSENTSLOTMESSAGE
#TODO test LASTSENTPROPMESSAGE
#TODO test FLIGHTCOUNTOPTION
#TODO test NORMALFLIGHTTACT_ID
#TODO test PROPFLIGHTTACT_ID
@test df[2, :OPERATINGACOPERICAOID] == "AFL"
#TODO test REROUTINGWHY
#TODO test REROUTINGLFIGHTSTATE
#TODO test RVR
#TODO test FTFMAIRAC
#TODO test FTFMENVBASELINENUM
#TODO test RTFMAIRAC
#TODO test RTFMENVBASELINENUM
#TODO test CTFMAIRAC
#TODO test CTFMENVBASELINENUM
#TODO test LASTRECEIVEDPROGRESSMESSAGE
#LAST TEST
@test df[5,:ADEP] == "UUEE"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 296 | @testset "For.jl" begin
filename = "data\\test.for"
df = DDR2import.For.read(filename)
@test df[1,:RANK] == "Base"
@test df[2,:Dep] == "ALANDISLANDS"
@test df[3,:Des] == "ESTONIA"
@test df[4,Symbol("2017")] == 2.536986301
@test df[5,Symbol("2024")] == 0.013151227
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1692 | # using Dates
@testset "Frp.jl" begin
filename = "data\\test1.frp"
dc = DDR2import.Frp.read(filename)
@test dc["ALBANIA"].freeroutepoints[1].type == "E"
@test dc["ALBANIA"].freeroutepoints[2].name == "NIKRO"
# @test dc["ALBANIA"].freeroutepoints[3].point.lat β 41.775277777777774 atol = 0.001
# @test dc["ALBANIA"].freeroutepoints[4].point.lon β 20.472777777777775 atol = 0.001
@test dc["ALBANIA"].freeroutepoints[3].point.Ο β 41.775277777777774 atol = 0.001
@test dc["ALBANIA"].freeroutepoints[4].point.Ξ» β 20.472777777777775 atol = 0.001
@test dc["ALBANIA"].freerouteairports[1].type == "A"
@test dc["ALBANIA"].freerouteairports[2].name == "DIRES"
@test dc["ALBANIA"].freerouteairports[3].airports[1] == "LATI"
@test isempty(dc["BIRD_DOMESTIC"].freerouteairports[3].airports) == true
@test dc["EI_FRA"].freerouteairports[18].airports[2] == "EINN"
#LAST
@test dc["FRAC"].freerouteairports[1].type == "A"
filename2 = "data\\test2.frp"
dc2 = DDR2import.Frp.read(filename2)
@test dc2["NAT4"].freeroutepoints[1].name == "*1847"
# @test dc2["NAT4"].freeroutepoints[1].point.lat == 18.0
# @test dc2["NAT4"].freeroutepoints[1].point.lon == -47.5
@test dc2["NAT4"].freeroutepoints[1].point.Ο == 18.0
@test dc2["NAT4"].freeroutepoints[1].point.Ξ» == -47.5
# @test dc2["NEFRA"].freeroutepoints[1129].point.lat β 58.1456667 atol = 0.00001
# @test dc2["SEEN_FRA_NORTH"].freeroutepoints[184].point.lon β 18.35583333 atol = 0.00001
@test dc2["NEFRA"].freeroutepoints[1129].point.Ο β 58.1456667 atol = 0.00001
@test dc2["SEEN_FRA_NORTH"].freeroutepoints[184].point.Ξ» β 18.35583333 atol = 0.00001
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 276 | # using Dates
@testset "Gar.jl" begin
filename = "data\\test.gar"
dc = DDR2import.Gar.read(filename)
@test dc["000EG"][5,1] == 57.0
@test dc["000EG"][6,2] β -13.8213888889 atol = 0.00001
#LAST
@test dc["000EU"][5,1] β 39.6677777778 atol = 0.00001
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 449 | # using Dates
@testset "Gsl.jl" begin
filename = "data\\test.gsl"
dc = DDR2import.Gsl.read(filename)
@test dc["BIRD"].ID == "BIRD"
@test dc["BIRD"].Name == "_"
@test dc["BIRD"].Category == "_"
@test dc["BIRD"].Type == "FIR"
@test dc["BIRD"].Airblocks[2].Name == "002BI"
@test dc["BIRD"].Airblocks[2].LowerFL == 0
@test dc["BIRD"].Airblocks[2].UpperFL == 999
#LAST
@test dc["BIRDBARDAR"].Type == "ES"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 296 | using Dates
@testset "Mot.jl" begin
filename = "data\\test.mot"
dc = DDR2import.Mot.read(filename)
@test Dates.hour(dc["NAT1"].onlytimes[1].begintime) == 0
@test Dates.second(dc["NAT1"].onlytimes[1].endtime) == 59
#LAST
@test dc["UKOV_FRA"].timetypes[1].type == "EX"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 444 | @testset "Narp.jl" begin
filename = "data\\test.narp"
df = DDR2import.Narp.read(filename)
@test df[1, :AIRPORT_ID] == "AGGA"
@test df[2, :AIRPORT_NAME] == "BALALAE"
@test df[3, :LAT_DEG] == -9.4316666667
@test df[3, :LON_DEG] == 160.0533333333
@test df[9, :TIS] == 0
@test df[9, :TRS] == 0
@test df[9, :TAXITIME] == 0
@test df[9, :ALTITUDE_FL] == 82
#LAST
@test df[13, :AIRPORT_ID] == "AGGV"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 269 | @testset "Nnpt.jl" begin
filename = "data\\test.nnpt"
df = DDR2import.Nnpt.read(filename)
@test df[1, :NAV_ID] == "%%BRU"
@test df[2, :LAT_DEG] == 48.6246363000
@test df[3, :LON_DEG] == 9.2302974333
#LAST
@test df[6, :NAV_ID] == "%%KIK"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 476 | @testset "Ntfv.jl" begin
filename = "data\\test.ntfv"
dc = DDR2import.Ntfv.read(filename)
@test dc["ABSON"].name == ""
@test dc["ALASO"].category == "_"
@test dc["AERODS1"].reflocname == "EDGGADS"
@test dc["AERODS2"].refloctype == "AS"
@test dc["AEROMN"].reflocrole == "G"
#LAST
@test dc["ALGERIA"].airblocks[1].name == "ELI>GC"
@test dc["ALGERIA"].airblocks[2].upperFL == "IN"
@test dc["ALGERIA"].airblocks[2].name == "GC>ELI"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 395 | using Dates
@testset "Country.jl" begin
filename = "data\\test.operator"
dc = DDR2import.Operator.read(filename)
@test dc["AAA"].name == "ANSETT AUSTRALIA HOLDINGS LTD"
@test dc["AAB"].callsign == "ABG"
@test dc["AAC"].countryid == "GB"
@test dc["AAD"].country == "CANADA"
@test Dates.day(dc["AAE"].startdate) == 1
@test Dates.month(dc["AAG"].enddate) == 12
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 310 | @testset "Routes.jl" begin
filename = "data\\test.routes"
dc = DDR2import.Routes.read(filename)
@test dc["ABESI8TLIME"].type == "DP"
@test dc["ABDIL1ALFMD"].route[2].wp == "TUPOX"
@test dc["ABDIL1ALFTZ"].route[3].location_type == "SP"
@test dc["ABIRO2DGMTT"].route[2].wp == "ABIRO"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 330 | import Dates
@testset "Runway.jl" begin
filename = "data\\test.runway"
df = DDR2import.Runway.read(filename)
@test df[1, :AIRPORT] == "BIAR"
@test df[2, :RWY] == "19"
@test Dates.year(df[3, :DATEACTIVE]) == 2018
@test Dates.minute(df[4, :TIMEOPEN]) == 0
@test Dates.hour(df[5, :TIMECLOSED]) == 23
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 1126 | using Dates
@testset "SO6.jl" begin
filename = "data\\test.so6"
df = DDR2import.SO6.read(filename)
@test df[2,:SEGMENT_ID] == "*AM80_!AAEW"
@test df[2,:ADEP] == "EHAM"
@test df[2,:ADES] == "UUEE"
@test df[2,:ACTYPE] == "A321"
@test df[2,:FLBEGINSEGMENT] == 10
@test df[2,:FLENDSEGMENT] == 25
@test df[2,:STATUS] == 0
@test df[2,:CALLSIGN] == "AFL2193"
@test df[2,:LATBEGINSEGMENT_DEG] == 3137.483333 / 60.0
@test df[2,:LONBEGINSEGMENT_DEG] == 284.183333 / 60.0
@test df[2,:LATENDSEGMENT_DEG] == 3135.833333 / 60.0
@test df[2,:LONENDSEGMENT_DEG] == 284.033333 / 60.0
@test df[2,:FLIGHT_ID] == 213765625
@test df[2,:SEQUENCE] == 2
@test df[2,:SEGMENT_LENGTH_M] == 1.652550 * 1852.0
@test df[2,:SEGMENT_PARITY] == 0
@test Dates.year(df[2,:DATETIMEBEGINSEGMENT]) == 2017
@test Dates.hour(df[2,:DATETIMEBEGINSEGMENT]) == 21
@test Dates.second(df[2,:DATETIMEBEGINSEGMENT]) == 26
@test Dates.day(df[2,:DATETIMEENDSEGMENT]) == 14
@test Dates.minute(df[2,:DATETIMEENDSEGMENT]) == 50
#LAST
@test df[3,:SEGMENT_ID] == "!AAEW_!AAEX"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 216 | # using Dates
@testset "Sid.jl" begin
filename = "data\\test.sid"
dc = DDR2import.Sid.read(filename)
@test dc["EEEI"][1] == "BALTI"
@test dc["EEEI"][end] == "TLL"
@test dc["BIBL"][1] == "RH"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 322 | @testset "Sls.jl" begin
filename = "data\\test.sls"
df = DDR2import.Sls.read(filename)
@test df[2,:SECTORNAME] == "ARMFRA"
@test df[3,:VOLUMENAME] == "BIRD_DOMESTIC"
@test df[36,:VOLUMEBOTTOMLEVEL] == 275
@test df[37,:VOLUMETOPLEVEL] == 660
#LAST
@test df[39,:SECTORNAME] == "UKOV_FRA"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 375 | # using Dates
@testset "Spc.jl" begin
filename = "data\\test.spc"
dc = DDR2import.Spc.read(filename)
@test dc["AFI"].name == "AFRICA"
@test dc["AFI"].type == "AREA"
@test dc["AFI"].sectors[1].name == "DA"
@test dc["AFI"].sectors[2].type == "NAS"
#LAST
@test dc["BENELUX"].name == "EB/EH"
@test dc["BENELUX"].sectors[end].name == "EH"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 235 | # using Dates
@testset "Star.jl" begin
filename = "data\\test.star"
dc = DDR2import.Star.read(filename)
@test dc["EEKA"][1] == "NEBSI"
@test dc["EEKA"][end] == "TEVNA"
#LAST
@test dc["NAT"][end] == "ZIBUT"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 828 | import Dates
@testset "T5.jl" begin
filename = "data\\test.t5"
df = DDR2import.T5.read(filename)
@test df[2,:FLIGHT_ID] == 213765625
@test df[2,:SECTOR_NAME] == "EHAAFIR"
@test Dates.year(df[2,:ENTRY_DATETIME]) == 2017
@test Dates.day(df[2,:ENTRY_DATETIME]) == 14
@test Dates.minute(df[2,:ENTRY_DATETIME]) == 49
@test Dates.month(df[2,:EXIT_DATETIME]) == 12
@test Dates.hour(df[2,:EXIT_DATETIME]) == 22
@test Dates.second(df[2,:EXIT_DATETIME]) == 0
@test df[2,:ENTRY_FL] == 0.0
@test df[2,:EXIT_FL] == 255.0
@test df[2,:ENTRY_SEGMENT_NAME] == "EHAM_*AM80"
@test df[2,:EXIT_SEGMENT_NAME] == "!AAEb_SONEB"
@test df[2,:TOT_DISTANCE_IN_SEGMENT_M] == 84.060844 * 1852.0
@test df[2,:TOT_TIME_IN_SEGMENT_S] == 840.0
#LAST
@test df[4,:SECTOR_NAME] == "EH_DN"
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | code | 412 | using Dates
@testset "Ur.jl" begin
filename = "data\\test.ur"
dc = DDR2import.Ur.read(filename)
@test Dates.day(dc["EB"].start_date) == 1
@test Dates.month(dc["ED"].end_date) == 2
@test dc["EE"].unitrate == 31.51
@test dc["EG"].valutaconversion == 0.848504
@test dc["EG"].valuta == "GBP"
@test dc["EH"].country == "Netherlands"
#LAST
@test dc["UG"].unitrate == 28.30
end
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.1.1 | 829430e89d511d1ffcc93e16866f26e2426d4089 | docs | 3483 | # DDR2import
[](https://travis-ci.com/rjdverbeek-tud/DDR2import.jl)
[](https://codecov.io/gh/rjdverbeek-tud/DDR2import.jl)
[](https://coveralls.io/github/rjdverbeek-tud/DDR2import.jl?branch=master)
The EUROCONTROL Demand Data Repository 'DDR2' provides air traffic management
(ATM) actors with the most accurate picture of past and future pan-European air
traffic demand (from several years ahead until the day before operation), as
well as environment data, analysis reports and tools that can read and process
the data.
All this information is management by DDR service, a EUROCONTROL cross-unit
activity, and can be accessed from the Demand Data Repository 2 'DDR2' web
portal. Access to the DDR2 web portal is restricted. Access conditions apply.
DDR future traffic can be forecast thanks to the knowledge of past traffic and
several thousands of flight intentions provided by airlines and airports that
are collected, stored, analysed and treated on a daily basis.
DDR traffic forecast supports strategic, seasonal and pre-tactical planning,
and also special events or major ATM evolution projects.
Finally, DDR provides a refined analysis of past demand to facilitate
post-operations analysis and to identify best practices for future operations.
Functionality
DDR2 gives access to:
Past traffic data - from August 2012 till now, traffic demand, last filed
flight plan traffic trajectories as well as actual trajectories are provided
for past analysis;
Past and Pre-OPS (one AIRAC in advance) environment data - they can be
downloaded and are used internally for processing future traffic trajectories.
They contain all information necessary to analyse and process sector loads,
capacity bottlenecks, re-routing options, etc;
Strategic traffic forecast - this covers the planning phase, from 18 months to
7 days before operations. It is used for medium- to-short-term capacity
planning and seasonal planning. Users can themselves generate, with several 4D
trajectory processing options, and download this type of forecast directly via
the DDR2 web portal;
Pre-tactical traffic forecast - it focusses on the planning phase, from 6 days
to 1 day before operations. Network pre-tactical planning is supported by the
NM PREDICT system and can be accessed via the DDR2 portal;
NEST and SAAM tools - they can be downloaded from DDR2 Web portal and are
compatible with DDR data. These tools analyse and process a great deal of information for the purpose of facilitating airspace design and capacity planning in Europe.
Users
The DDR addresses the needs of a wide range of users such as:
air navigation service providers (ANSPs), who use it to prepare and optimise
their capacity plans;
airlines, who rely on it to detect flight efficiency improvement opportunities,
by visualising and comparing flight plan trajectories for any period of time in
the past;
airspace management actors, for airspace management and coordination of the
available airspace;
airports, with the aim of integrating their local plans with the Network
Operations Plan;
the NM at central/FAB/local level.
See EUROCONTROL NEST Manual Section 9.7 for fileformat descriptions
https://www.eurocontrol.int/ddr
| DDR2import | https://github.com/rjdverbeek-tud/DDR2import.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 269 | using Documenter, SparsityOperators
makedocs(
modules = [SparsityOperators],
format = :html,
checkdocs = :exports,
sitename = "SparsityOperators.jl",
pages = Any["index.md"]
)
deploydocs(
repo = "github.com/tknopp/SparsityOperators.jl.git",
)
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 1949 | export DCTOp
mutable struct DCTOp{T} <: AbstractLinearOperator{T}
nrow :: Int
ncol :: Int
symmetric :: Bool
hermitian :: Bool
prod! :: Function
tprod! :: Nothing
ctprod! :: Function
nprod :: Int
ntprod :: Int
nctprod :: Int
args5 :: Bool
use_prod5! :: Bool
allocated5 :: Bool
Mv5 :: Vector{T}
Mtu5 :: Vector{T}
plan
dcttype::Int
end
LinearOperators.storage_type(op::DCTOp) = typeof(op.Mv5)
"""
DCTOp(T::Type, shape::Tuple, dcttype=2)
returns a `DCTOp <: AbstractLinearOperator` which performs a DCT on a given input array.
# Arguments:
* `T::Type` - type of the array to transform
* `shape::Tuple` - size of the array to transform
* `dcttype` - type of DCT (currently `2` and `4` are supported)
"""
function DCTOp(T::Type, shape::Tuple, dcttype=2)
tmp=Array{Complex{real(T)}}(undef, shape)
if dcttype == 2
plan = plan_dct!(tmp)
iplan = plan_idct!(tmp)
prod! = (res, x) -> dct_multiply2(res, plan, x, tmp)
tprod! = (res, x) -> dct_multiply2(res, iplan, x, tmp)
elseif dcttype == 4
factor = T(sqrt(1.0/(prod(shape)* 2^length(shape)) ))
plan = FFTW.plan_r2r!(tmp,FFTW.REDFT11)
prod! = (res, x) -> dct_multiply4(res, plan, x, tmp, factor)
tprod! = (res, x) -> dct_multiply4(res, plan, x, tmp, factor)
else
error("DCT type $(dcttype) not supported")
end
return DCTOp{T}(prod(shape), prod(shape), false, false,
prod!, nothing, tprod!,
0, 0, 0, true, false, true, T[], T[],
plan, dcttype)
end
function dct_multiply2(res::Vector{T}, plan::P, x::Vector{T}, tmp::Array{T,D}) where {T,P,D}
tmp[:] .= x
plan * tmp
res .= vec(tmp)
end
function dct_multiply4(res::Vector{T}, plan::P, x::Vector{T}, tmp::Array{T,D}, factor::T) where {T,P,D}
tmp[:] .= x
plan * tmp
res .= factor.*vec(tmp)
end
function Base.copy(S::DCTOp)
return DCTOp(eltype(S), size(S.plan), S.dcttype)
end | SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 1810 | export DSTOp
mutable struct DSTOp{T} <: AbstractLinearOperator{T}
nrow :: Int
ncol :: Int
symmetric :: Bool
hermitian :: Bool
prod! :: Function
tprod! :: Nothing
ctprod! :: Function
nprod :: Int
ntprod :: Int
nctprod :: Int
args5 :: Bool
use_prod5! :: Bool
allocated5 :: Bool
Mv5 :: Vector{T}
Mtu5 :: Vector{T}
plan
iplan
end
LinearOperators.storage_type(op::DSTOp) = typeof(op.Mv5)
"""
DSTOp(T::Type, shape::Tuple)
returns a `LinearOperator` which performs a DST on a given input array.
# Arguments:
* `T::Type` - type of the array to transform
* `shape::Tuple` - size of the array to transform
"""
function DSTOp(T::Type, shape::Tuple)
tmp=Array{Complex{real(T)}}(undef, shape)
plan = FFTW.plan_r2r!(tmp,FFTW.RODFT10)
iplan = FFTW.plan_r2r!(tmp,FFTW.RODFT01)
w = weights(shape, T)
return DSTOp{T}(prod(shape), prod(shape), true, false
, (res,x) -> dst_multiply!(res,plan,x,tmp,w)
, nothing
, (res,x) -> dst_bmultiply!(res,iplan,x,tmp,w)
, 0, 0, 0, true, false, true, T[], T[]
, plan
, iplan)
end
function weights(s, T::Type)
w = ones(T,s...)./T(sqrt(8*prod(s)))
w[s[1],:,:]./= T(sqrt(2))
if length(s)>1
w[:,s[2],:]./= T(sqrt(2))
if length(s)>2
w[:,:,s[3]]./= T(sqrt(2))
end
end
return reshape(w,prod(s))
end
function dst_multiply!(res::Vector{T}, plan::P, x::Vector{T}, tmp::Array{T,D}, weights::Vector{T}) where {T,P,D}
tmp[:] .= x
plan * tmp
res .= vec(tmp).*weights
end
function dst_bmultiply!(res::Vector{T}, plan::P, x::Vector{T}, tmp::Array{T,D}, weights::Vector{T}) where {T,P,D}
tmp[:] .= x./weights
plan * tmp
res[:] .= vec(tmp)./(8*length(tmp))
end
function Base.copy(S::DSTOp)
return DSTOp(eltype(S), size(S.plan))
end | SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 2808 | export FFTOp
import Base.copy
mutable struct FFTOp{T} <: AbstractLinearOperator{T}
nrow :: Int
ncol :: Int
symmetric :: Bool
hermitian :: Bool
prod! :: Function
tprod! :: Nothing
ctprod! :: Function
nprod :: Int
ntprod :: Int
nctprod :: Int
args5 :: Bool
use_prod5! :: Bool
allocated5 :: Bool
Mv5 :: Vector{T}
Mtu5 :: Vector{T}
plan
iplan
shift::Bool
unitary::Bool
end
LinearOperators.storage_type(op::FFTOp) = typeof(op.Mv5)
"""
FFTOp(T::Type, shape::Tuple, shift=true, unitary=true)
returns an operator which performs an FFT on Arrays of type T
# Arguments:
* `T::Type` - type of the array to transform
* `shape::Tuple` - size of the array to transform
* (`shift=true`) - if true, fftshifts are performed
* (`unitary=true`) - if true, FFT is normalized such that it is unitary
"""
function FFTOp(T::Type, shape::NTuple{D,Int64}, shift::Bool=true; unitary::Bool=true, cuda::Bool=false) where D
#tmpVec = cuda ? CuArray{T}(undef,shape) : Array{Complex{real(T)}}(undef, shape)
tmpVec = Array{Complex{real(T)}}(undef, shape)
plan = plan_fft!(tmpVec; flags=FFTW.MEASURE)
iplan = plan_bfft!(tmpVec; flags=FFTW.MEASURE)
if unitary
facF = T(1.0/sqrt(prod(shape)))
facB = T(1.0/sqrt(prod(shape)))
else
facF = T(1.0)
facB = T(1.0)
end
let shape_=shape, plan_=plan, iplan_=iplan, tmpVec_=tmpVec, facF_=facF, facB_=facB
if shift
return FFTOp{T}(prod(shape), prod(shape), false, false
, (res, x) -> fft_multiply_shift!(res, plan_, x, shape_, facF_, tmpVec_)
, nothing
, (res, x) -> fft_multiply_shift!(res, iplan_, x, shape_, facB_, tmpVec_)
, 0, 0, 0, true, false, true, T[], T[]
, plan
, iplan
, shift
, unitary)
else
return FFTOp{T}(prod(shape), prod(shape), false, false
, (res, x) -> fft_multiply!(res, plan_, x, facF_, tmpVec_)
, nothing
, (res, x) -> fft_multiply!(res, iplan_, x, facB_, tmpVec_)
, 0, 0, 0, true, false, true, T[], T[]
, plan
, iplan
, shift
, unitary)
end
end
end
function fft_multiply!(res::AbstractVector{T}, plan::P, x::AbstractVector{Tr}, factor::T, tmpVec::Array{T,D}) where {T, Tr, P<:AbstractFFTs.Plan, D}
tmpVec[:] .= x
plan * tmpVec
res .= factor .* vec(tmpVec)
end
function fft_multiply_shift!(res::AbstractVector{T}, plan::P, x::AbstractVector{Tr}, shape::NTuple{D}, factor::T, tmpVec::Array{T,D}) where {T, Tr, P<:AbstractFFTs.Plan, D}
ifftshift!(tmpVec, reshape(x,shape))
plan * tmpVec
fftshift!(reshape(res,shape), tmpVec)
res .*= factor
end
function Base.copy(S::FFTOp)
return FFTOp(eltype(S), size(S.plan), S.shift, unitary=S.unitary)
end
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 3115 | export GradientOp
"""
gradOp(T::Type, shape::NTuple{1,Int64})
1d gradient operator for an array of size `shape`
"""
GradientOp(T::Type, shape::NTuple{1,Int64}) = GradientOp(T,shape,1)
"""
gradOp(T::Type, shape::NTuple{2,Int64})
2d gradient operator for an array of size `shape`
"""
function GradientOp(T::Type, shape::NTuple{2,Int64})
return vcat( GradientOp(T,shape,1), GradientOp(T,shape,2) )
end
"""
gradOp(T::Type, shape::NTuple{3,Int64})
3d gradient operator for an array of size `shape`
"""
function GradientOp(T::Type, shape::NTuple{3,Int64})
return vcat( GradientOp(T,shape,1), GradientOp(T,shape,2), GradientOp(T,shape,3) )
end
"""
gradOp(T::Type, shape::NTuple{N,Int64}, dim::Int64) where N
directional gradient operator along the dimension `dim`
for an array of size `shape`
"""
function GradientOp(T::Type, shape::NTuple{N,Int64}, dim::Int64) where N
nrow = div( (shape[dim]-1)*prod(shape), shape[dim] )
ncol = prod(shape)
return LinearOperator{T}(nrow, ncol, false, false,
(res,x) -> (grad!(res,x,shape,dim) ),
(res,x) -> (grad_t!(res,x,shape,dim) ),
nothing )
end
# directional gradients
function grad!(res::T, img::U, shape::NTuple{1,Int64}, dim::Int64) where {T<:AbstractVector,U<:AbstractVector}
res .= img[1:end-1].-img[2:end]
end
function grad!(res::T, img::U, shape::NTuple{2,Int64}, dim::Int64) where {T<:AbstractVector,U<:AbstractVector}
img = reshape(img,shape)
if dim==1
res .= vec(img[1:end-1,:].-img[2:end,:])
else
res .= vec(img[:,1:end-1].-img[:,2:end])
end
end
function grad!(res::T,img::U, shape::NTuple{3,Int64}, dim::Int64) where {T<:AbstractVector,U<:AbstractVector}
img = reshape(img,shape)
if dim==1
res .= vec(img[1:end-1,:,:].-img[2:end,:,:])
elseif dim==2
res.= vec(img[:,1:end-1,:].-img[:,2:end,:])
else
res.= vec(img[:,:,1:end-1].-img[:,:,2:end])
end
end
# adjoint of directional gradients
function grad_t!(res::T, g::U, shape::NTuple{1,Int64}, dim::Int64) where {T<:AbstractVector,U<:AbstractVector}
res .= zero(eltype(g))
res[1:shape[1]-1] .= g
res[2:shape[1]] .-= g
end
function grad_t!(res::T, g::U, shape::NTuple{2,Int64}, dim::Int64) where {T<:AbstractVector,U<:AbstractVector}
res .= zero(eltype(g))
res_ = reshape(res,shape)
if dim==1
g = reshape(g,shape[1]-1,shape[2])
res_[1:shape[1]-1,:] .= g
res_[2:shape[1],:] .-= g
else
g = reshape(g,shape[1],shape[2]-1)
res_[:,1:shape[2]-1] .= g
res_[:,2:shape[2]] .-= g
end
end
function grad_t!(res::T, g::U, shape::NTuple{3,Int64}, dim::Int64) where {T<:AbstractVector,U<:AbstractVector}
res .= zero(eltype(g))
res_ = reshape(res,shape)
if dim==1
g = reshape(g,shape[1]-1,shape[2],shape[3])
res_[1:shape[1]-1,:,:] .= g
res_[2:shape[1],:,:] .-= g
elseif dim==2
g = reshape(g,shape[1],shape[2]-1,shape[3])
res_[:,1:shape[2]-1,:] .= g
res_[:,2:shape[2],:] .-= g
else
g = reshape(g,shape[1],shape[2],shape[3]-1)
res_[:,:,1:shape[3]-1] .= g
res_[:,:,2:shape[3]] .-= g
end
end | SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 1359 | export NormalOp, normalOperator
mutable struct NormalOp{T,S,D,V} <: AbstractLinearOperator{T}
nrow :: Int
ncol :: Int
symmetric :: Bool
hermitian :: Bool
prod! :: Function
tprod! :: Nothing
ctprod! :: Nothing
nprod :: Int
ntprod :: Int
nctprod :: Int
args5 :: Bool
use_prod5! :: Bool
allocated5 :: Bool
Mv5 :: Vector{T}
Mtu5 :: Vector{T}
parent::S
weights::D
tmp::V
end
LinearOperators.storage_type(op::NormalOp) = typeof(op.Mv5)
function NormalOp(parent, weights)
T = promote_type(eltype(parent), eltype(weights))
tmp = Vector{T}(undef, size(parent, 1))
return NormalOp(parent, weights, tmp)
end
function NormalOp(parent, weights, tmp::Vector{T}) where T
function produ!(y, parent, tmp, x)
mul!(tmp, parent, x)
mul!(tmp, weights, tmp) # This can be dangerous. We might need to create two tmp vectors
return mul!(y, adjoint(parent), tmp)
end
return NormalOp(size(parent,2), size(parent,2), false, false
, (res,x) -> produ!(res, parent, tmp, x)
, nothing
, nothing
, 0, 0, 0, false, false, false, T[], T[]
, parent, weights, tmp)
end
function Base.copy(S::NormalOp)
return NormalOp(copy(S.parent), S.weights, copy(S.tmp))
end
function normalOperator(parent, weights=opEye(eltype(parent), size(parent,1)))
return NormalOp(parent, weights)
end
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 1112 | export vectorizePattern, SamplingOp
"""
idx contains sampling index (for the first dimension) of a multidimensional Array
of size "shape". Transform this into idx into the corresponding vector index
"""
function vectorizePattern(idx::T, shape::Tuple) where T<:AbstractArray{Int}
return [ floor(Int,(i-1)/size(idx,1))*shape[1]+idx[i] for i = 1:length(idx) ]
end
"""
SamplingOp(pattern::Array{Int}, shape::Tuple)
builds a `LinearOperator` which only returns the vector elements at positions
indicated by pattern.
# Arguents
* `pattern::Array{Int}` - indices to sample
* `shape::Tuple` - size of the array to sample
"""
function SamplingOp(pattern::T, shape::Tuple, type::Type=ComplexF64) where T<:AbstractArray{Int}
ndims(pattern)>1 ? idx = vectorizePattern(pattern, shape) : idx = pattern
return opEye(type,length(idx))*opRestriction(idx, prod(shape))
end
function SamplingOp(pattern::T) where T<:AbstractArray{Bool}
function prod!(res::Vector{U}, x::Vector{V}) where {U,V}
res .= pattern.*x
end
return LinearOperator(T, length(pattern), length(pattern), true, false, prod!)
end | SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 2051 | module SparsityOperators
import Base: length, iterate, \
using LinearAlgebra
import LinearAlgebra.BLAS: gemv, gemv!
import LinearAlgebra: BlasFloat, normalize!, norm, rmul!, lmul!
using SparseArrays
using Random
#using CUDA
using Reexport
@reexport using Reexport
@reexport using LinearOperators
@reexport using FFTW
@reexport using Wavelets
LinearOperators.use_prod5!(op::opEye) = false
LinearOperators.has_args5(op::opEye) = false
const Trafo = Union{AbstractMatrix, AbstractLinearOperator, Nothing}
const FuncOrNothing = Union{Function, Nothing}
# Helper function to wrap a prod into a 5-args mul
function wrapProd(prod::Function)
Ξ» = (res, x, Ξ±, Ξ²) -> begin
if Ξ² == zero(Ξ²)
res .= prod(x) .* Ξ±
else
res .= prod(x) .* Ξ± .+ Ξ² .* res
end
end
return Ξ»
end
include("FFTOp.jl")
include("DCTOp.jl")
include("DSTOp.jl")
include("WaveletOp.jl")
include("GradientOp.jl")
include("SamplingOp.jl")
include("WeightingOp.jl")
include("NormalOp.jl")
export linearOperator, linearOperatorList
linearOperator(op::Nothing,shape,T::Type=ComplexF32) = nothing
"""
returns a list of currently implemented `LinearOperator`s
"""
function linearOperatorList()
return ["DCT-II", "DCT-IV", "FFT", "DST", "Wavelet", "Gradient"]
end
"""
linearOperator(op::AbstractString, shape)
returns the `LinearOperator` with name `op`.
# valid names
* `"FFT"`
* `"DCT-II"`
* `"DCT-IV"`
* `"DST"`
* `"Wavelet"`
* `"Gradient"`
"""
function linearOperator(op::AbstractString, shape, T::Type=ComplexF32)
shape_ = tuple(shape...)
if op == "FFT"
trafo = FFTOp(T, shape_, false) #FFTOperator(shape)
elseif op == "DCT-II"
shape_ = tuple(shape[shape .!= 1]...)
trafo = DCTOp(T, shape_, 2)
elseif op == "DCT-IV"
shape_ = tuple(shape[shape .!= 1]...)
trafo = DCTOp(T, shape_, 4)
elseif op == "DST"
trafo = DSTOp(T, shape_)
elseif op == "Wavelet"
trafo = WaveletOp(T,shape_)
elseif op=="Gradient"
trafo = GradientOp(T,shape_)
else
error("Unknown transformation")
end
trafo
end
end # module
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 582 | export WaveletOp
"""
WaveletOp(shape, wt=wavelet(WT.db2))
returns a `αΊaveletOp <: AbstractLinearOperator`, which performs a Wavelet transform on
a given input array.
# Arguments
* `shape` - size of the Array to transform
* (`wt=wavelet(WT.db2)`) - Wavelet to apply
"""
function WaveletOp(T::Type, shape, wt=wavelet(WT.db2))
return LinearOperator(T, prod(shape), prod(shape), false, false
, (res,x)->dwt!(reshape(res,shape), reshape(x,shape), wt)
, nothing
, (res,x)->idwt!(reshape(res,shape), reshape(x,shape), wt) )
end
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 454 | export WeightingOp
"""
WeightingOp(weights::Vector{T}, rep::Int=1) where T
generates a `LinearOperator` which multiplies an input vector index-wise with `weights`
# Arguments
* `weights::Vector{T}` - weights vector
* `rep::Int=1` - number of sub-arrays that need to be multiplied with `weights`
"""
function WeightingOp(weights::T, rep::Int=1) where T<:AbstractVector
weights_cat = repeat(weights,rep)
return opDiagonal(weights_cat)
end
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 135 | using SparsityOperators
using Test
using Random
using LinearAlgebra
using FFTW
include("testNormalOp.jl")
include("testOperators.jl")
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 290 |
@info "test normal operator"
N = 512
Random.seed!(1234)
x = rand(N)
A = rand(N,N)
W = WeightingOp(rand(N))
y1 = adjoint(A)*W*A*x
y = normalOperator(A,W)*x
@test norm(y1 - y) / norm(y) β 0 atol=0.01
y1 = adjoint(A)*A*x
y = normalOperator(A)*x
@test norm(y1 - y) / norm(y) β 0 atol=0.01 | SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 4393 | function testDCT1d(N=32)
Random.seed!(1235)
x = zeros(ComplexF64, N^2)
for i=1:5
x .+= rand()*cos.(rand(1:N^2)*collect(1:N^2)) .+ 1im*rand()*cos.(rand(1:N^2)*collect(1:N^2))
end
D1 = DCTOp(ComplexF64,(N^2,),2)
D2 = sqrt(2/N^2)*[cos(pi/(N^2)*j*(k+0.5)) for j=0:N^2-1,k=0:N^2-1]
D2[1,:] .*= 1/sqrt(2)
D3 = DCTOp(ComplexF64,(N^2,),4)
D4 = sqrt(2/N^2)*[cos(pi/(N^2)*(j+0.5)*(k+0.5)) for j=0:N^2-1,k=0:N^2-1]
y1 = D1*x
y2 = D2*x
@test norm(y1 - y2) / norm(y1) β 0 atol=0.01
y3 = D3*x
y4 = D4*x
@test norm(y3 - y4) / norm(y1) β 0 atol=0.01
x1 = adjoint(D1)*y1
@test norm(x1 - x) / norm(x) β 0 atol=0.01
x2 = adjoint(D3)*y3
@test norm(x2 - x) / norm(x) β 0 atol=0.01
end
function testFFT1d(N=32,shift=true)
Random.seed!(1234)
x = zeros(N^2)
for i=1:5
x .+= rand()*cos.(rand(1:N^2)*collect(1:N^2))
end
D1 = FFTOp(ComplexF64,(N^2,),shift)
D2 = 1.0/N*[exp(-2*pi*im*j*k/N^2) for j=0:N^2-1,k=0:N^2-1]
y1 = D1*x
if shift
y2 = fftshift(D2*fftshift(x))
else
y2 = D2*x
end
@test norm(y1 - y2) / norm(y1) β 0 atol=0.01
x1 = adjoint(D1) * y1
if shift
x2 = ifftshift(adjoint(D2)*ifftshift(y2))
else
x2 = adjoint(D2)*y2
end
@test norm(x1 - x2) / norm(x1) β 0 atol=0.01
end
function testFFT2d(N=32,shift=true)
Random.seed!(1234)
x = zeros(N^2)
for i=1:5
x .+= rand()*cos.(rand(1:N^2)*collect(1:N^2))
end
D1 = FFTOp(ComplexF64,(N,N),shift)
idx = CartesianIndices((N,N))[collect(1:N^2)]
D2 = 1.0/N*[ exp(-2*pi*im*((idx[j][1]-1)*(idx[k][1]-1)+(idx[j][2]-1)*(idx[k][2]-1))/N) for j=1:N^2, k=1:N^2 ]
y1 = D1*x
if shift
y2 = D2*vec(fftshift(reshape(x,N,N)))
y2 = vec(fftshift(reshape(y2,N,N)))
else
y2 = D2*x
end
@test norm(y1 - y2) / norm(y1) β 0 atol=0.01
x1 = adjoint(D1) * y1
if shift
x2 = adjoint(D2)*vec(ifftshift(reshape(y2,N,N)))
x2 = vec(ifftshift(reshape(x2,N,N)))
else
x2 = adjoint(D2)*y2
end
@test norm(x1 - x2) / norm(x1) β 0 atol=0.01
end
function testWeighting(N=512)
Random.seed!(1234)
x1 = rand(N)
weights = rand(N)
W = WeightingOp(weights)
y1 = W*x1
y = weights .* x1
@test norm(y1 - y) / norm(y) β 0 atol=0.01
x2 = rand(2*N)
W2 = WeightingOp(weights,2)
y2 = W2*x2
y = repeat(weights,2) .* x2
@test norm(y2 - y) / norm(y) β 0 atol=0.01
end
function testGradOp1d(N=512)
x = rand(N)
G = GradientOp(eltype(x),size(x))
G0 = Bidiagonal(ones(N),-ones(N-1), :U)[1:N-1,:]
y = G*x
y0 = G0*x
@test norm(y - y0) / norm(y0) β 0 atol=0.001
xr = transpose(G)*y
xr0 = transpose(G0)*y0
@test norm(y - y0) / norm(y0) β 0 atol=0.001
end
function testGradOp2d(N=64)
x = repeat(1:N,1,N)
G = GradientOp(eltype(x),size(x))
G_1d = Bidiagonal(ones(N),-ones(N-1), :U)[1:N-1,:]
y = G*vec(x)
y0 = vcat( vec(G_1d*x), vec(x*transpose(G_1d)) )
@test norm(y - y0) / norm(y0) β 0 atol=0.001
xr = transpose(G)*y
y0_x = reshape(y0[1:N*(N-1)],N-1,N)
y0_y = reshape(y0[N*(N-1)+1:end],N,N-1)
xr0 = transpose(G_1d)*y0_x + y0_y*G_1d
xr0 = vec(xr0)
@test norm(xr - xr0) / norm(xr0) β 0 atol=0.001
end
function testSampling(N=64)
x = rand(ComplexF64,N,N)
# index-based sampling
idx = shuffle(collect(1:N^2)[1:N*div(N,2)])
SOp = SamplingOp(idx,(N,N))
y = SOp*vec(x)
x2 = adjoint(SOp)*y
# mask-based sampling
msk = zeros(Bool,N*N);msk[idx].=true
SOp2 = SamplingOp(msk)
y2 = ComplexF64.(SOp2*vec(x))
# references
y_ref = vec(x[idx])
x2_ref = zeros(ComplexF64,N^2)
x2_ref[idx] .= y_ref
# perform tests
@test norm(y - y_ref) / norm(y_ref) β 0 atol=0.000001
@test norm(x2 - x2_ref) / norm(x2_ref) β 0 atol=0.000001
@test norm(y2 - x2_ref) / norm(x2_ref) β 0 atol=0.000001
end
function testWavelet(M=64,N=60)
x = rand(M,N)
WOp = WaveletOp(Float64,(M,N))
x_wavelet = WOp*vec(x)
x_reco = reshape( adjoint(WOp)*x_wavelet, M, N)
@test norm(x_reco - x) / norm(x) β 0 atol=0.001
end
@testset "Linear Operators" begin
@info "test DCT-II and DCT-IV"
for N in [2,8,16,32]
testDCT1d(N)
end
@info "test FFT"
for N in [8,16,32]
testFFT1d(N,false)
testFFT1d(N,true)
testFFT2d(N,false)
testFFT2d(N,true)
end
@info "test Weighting"
testWeighting(512)
@info "test gradientOp"
testGradOp1d(512)
testGradOp2d(64)
@info "test sampling"
testSampling(64)
@info "test WaveletOp"
testWavelet(64,64)
testWavelet(64,60)
end
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 363 | ####
#### Coverage summary, printed as "(percentage) covered".
####
#### Useful for CI environments that just want a summary (eg a Gitlab setup).
####
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
covered_lines, total_lines = get_summary(process_folder())
percentage = covered_lines / total_lines * 100
println("($(percentage)%) covered")
end
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | code | 266 | # only push coverage from one bot
get(ENV, "TRAVIS_OS_NAME", nothing) == "linux" || exit(0)
get(ENV, "TRAVIS_JULIA_VERSION", nothing) == "1.1" || exit(0)
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
Codecov.submit(Codecov.process_folder())
end
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | docs | 772 | # SparsityOperators.jl
<!--




 -->
[](https://github.com/tknopp/SparsityOperators.jl/actions)
[](http://codecov.io/github/tknopp/SparsityOperators.jl?branch=master)
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.4.7 | a67f4a6361b37e452f9c56b62f53a66f9606e90f | docs | 48 | # SparsityOperators
*Documentation goes here.*
| SparsityOperators | https://github.com/tknopp/SparsityOperators.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 613 | using DataGraphs
using Documenter
DocMeta.setdocmeta!(DataGraphs, :DocTestSetup, :(using DataGraphs); recursive=true)
makedocs(;
modules=[DataGraphs],
authors="Matthew Fishman <[email protected]> and contributors",
repo="https://github.com/mtfishman/DataGraphs.jl/blob/{commit}{path}#{line}",
sitename="DataGraphs.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://mtfishman.github.io/DataGraphs.jl",
assets=String[],
),
pages=["Home" => "index.md"],
)
deploydocs(; repo="github.com/mtfishman/DataGraphs.jl", devbranch="main")
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 983 | using DataGraphs: DataGraph
using Graphs: Edge, grid, has_edge, has_vertex
g = grid((4,))
dg = DataGraph(g; vertex_data_eltype=String, edge_data_eltype=Symbol)
@show !isassigned(dg, Edge(1, 2))
@show !isassigned(dg, 1 => 2)
@show !isassigned(dg, Edge(1 => 2))
@show !isassigned(dg, 1 => 3)
@show !isassigned(dg, 1)
@show !isassigned(dg, 2)
@show !isassigned(dg, 3)
@show !isassigned(dg, 4)
@show has_edge(dg, 1, 2)
@show has_edge(dg, 1 => 2)
@show !has_edge(dg, 1, 3)
@show !has_edge(dg, 1 => 3)
@show has_vertex(dg, 1)
@show has_vertex(dg, 4)
@show !has_vertex(dg, 0)
@show !has_vertex(dg, 5)
dg[1] = "V1"
dg[2] = "V2"
dg[3] = "V3"
dg[4] = "V4"
@show isassigned(dg, 1)
@show dg[1] == "V1"
@show dg[2] == "V2"
@show dg[3] == "V3"
@show dg[4] == "V4"
dg[1 => 2] = :E12
dg[2 => 3] = :E23
dg[Edge(3, 4)] = :E34
#@show isassigned(dg, (1, 2))
@show isassigned(dg, Edge(2, 3))
@show isassigned(dg, 3 => 4)
@show dg[Edge(1, 2)] == :E12
@show dg[2 => 3] == :E23
@show dg[3 => 4] == :E34
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 563 | using Graphs: edges, has_edge, has_vertex, ne, nv, vertices
using NamedGraphs.GraphsExtensions: β
using NamedGraphs.NamedGraphGenerators: named_grid
using DataGraphs: DataGraph
g = DataGraph(named_grid((2, 2)); vertex_data_eltype=String, edge_data_eltype=String)
for v in vertices(g)
g[v] = "V$v"
end
for e in edges(g)
g[e] = "E$e"
end
gg = g β g
@show has_vertex(gg, ((1, 1), 1))
@show has_vertex(gg, ((1, 1), 2))
@show has_edge(gg, ((1, 1), 1) => ((1, 2), 1))
@show has_edge(gg, ((1, 1), 2) => ((1, 2), 2))
@show nv(gg) == 2nv(g)
@show ne(gg) == 2ne(g)
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 1267 | using DataGraphs: DataGraph
using Graphs: grid, has_edge, has_vertex
using NamedGraphs: NamedGraph, NamedEdge
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
dg = DataGraph(g; vertex_data_eltype=String, edge_data_eltype=Symbol)
@show has_vertex(dg, "A")
@show has_vertex(dg, "D")
# Broken, see https://github.com/andyferris/Dictionaries.jl/issues/143.
# @show !has_vertex(dg, 0)
# @show !has_vertex(dg, 5)
@show has_edge(dg, "A", "B")
@show has_edge(dg, "A" => "B")
@show !has_edge(dg, "A", "C")
@show !has_edge(dg, "A" => "C")
@show !isassigned(dg, "A")
@show !isassigned(dg, "B")
@show !isassigned(dg, "C")
@show !isassigned(dg, "D")
@show !isassigned(dg, NamedEdge("A", "B"))
@show !isassigned(dg, "A" => "B")
@show !isassigned(dg, NamedEdge("A" => "B"))
@show !isassigned(dg, "A" => "C")
dg["A"] = "V1"
dg["B"] = "V2"
dg["C"] = "V3"
dg["D"] = "V4"
# Error: does not have vertex
# dg[1] = "X"
@show isassigned(dg, "A")
@show dg["A"] == "V1"
@show dg["B"] == "V2"
@show dg["C"] == "V3"
@show dg["D"] == "V4"
dg["A" => "B"] = :E12
dg["B" => "C"] = :E23
dg[NamedEdge("C", "D")] = :E34
@show isassigned(dg, NamedEdge("B", "C"))
@show isassigned(dg, "C" => "D")
@show dg[NamedEdge("A", "B")] == :E12
@show dg["B" => "C"] == :E23
@show dg["C" => "D"] == :E34
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 537 | using DataGraphs: DataGraph
using NamedGraphs: NamedEdge
using NamedGraphs.NamedGraphGenerators: named_grid
g = named_grid((2, 2))
dg = DataGraph(g; vertex_data_eltype=String, edge_data_eltype=String)
dg[1, 1] = "X11"
@show dg[1, 1] == "X11"
dg[(1, 1) => (1, 2)] = "X11βX12"
@show dg[(1, 1) => (1, 2)] == "X11βX12"
@show dg[(1, 2) => (1, 1)] == "X11βX12"
@show isassigned(dg, NamedEdge((1, 1), (1, 2)))
@show !isassigned(dg, NamedEdge((1, 1), (2, 2)))
@show isassigned(dg, (1, 1) => (1, 2))
@show !isassigned(dg, (1, 1) => (2, 2))
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 1104 | using DataGraphs: DataGraph
using NamedGraphs.GraphsExtensions: subgraph
using NamedGraphs.NamedGraphGenerators: named_grid
using Graphs: ne, nv
g = named_grid((2, 2))
dg = DataGraph(g; vertex_data_eltype=String, edge_data_eltype=String)
dg[1, 1] = "V11"
dg[1, 2] = "V12"
dg[(1, 1) => (1, 2)] = "E11β12"
@show dg[1, 1] == "V11"
@show dg[1, 2] == "V12"
@show isnothing(get(dg, (2, 1), nothing))
@show dg[(1, 1) => (1, 2)] == "E11β12"
@show dg[(1, 2) => (1, 1)] == "E11β12"
@show isnothing(get(dg, (1, 1) => (2, 1), nothing))
dg_1c = subgraph(v -> v[1] == 1, dg) # dg[1, :]
dg_2c = subgraph(v -> v[1] == 2, dg) # dg[2, :]
dg_c1 = subgraph(v -> v[2] == 1, dg) # dg[:, 1]
dg_c2 = subgraph(v -> v[2] == 2, dg) # dg[:, 2]
@show dg_1c[1, 1] == "V11"
@show dg_1c[1, 2] == "V12"
@show dg_1c[(1, 1) => (1, 2)] == "E11β12"
@show nv(dg) == 4
@show ne(dg) == 4
@show nv(dg_1c) == 2
@show ne(dg_1c) == 1
@show nv(dg_2c) == 2
@show ne(dg_2c) == 1
@show nv(dg_c1) == 2
@show ne(dg_c1) == 1
@show nv(dg_c2) == 2
@show ne(dg_c2) == 1
dg_nn = subgraph(dg, [(1, 1), (2, 2)])
@show nv(dg_nn) == 2
@show ne(dg_nn) == 0
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 273 | module DataGraphsGraphsFlowsExt
using DataGraphs: AbstractDataGraph, underlying_graph
using GraphsFlows: GraphsFlows
function GraphsFlows.mincut(graph::AbstractDataGraph, args...; kwargs...)
return GraphsFlows.mincut(underlying_graph(graph), args...; kwargs...)
end
end
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 1365 | module DataGraphsNamedGraphsExt
using DataGraphs: DataGraphs, AbstractDataGraph, underlying_graph
using NamedGraphs: NamedGraphs, AbstractNamedGraph
DataGraphs.is_underlying_graph(::Type{<:AbstractNamedGraph}) = true
for f in [:(NamedGraphs.position_graph), :(NamedGraphs.vertex_positions)]
@eval begin
function $f(graph::AbstractDataGraph)
return $f(underlying_graph(graph))
end
end
end
using Graphs: edgetype, vertices
using NamedGraphs.OrdinalIndexing: OrdinalSuffixedInteger
# TODO: Define through some intermediate `to_vertex` function
# (analagous to Julia's `to_indices`) instead of through
# overloading `Base.getindex`.
function Base.getindex(graph::AbstractDataGraph, vertex::OrdinalSuffixedInteger)
return graph[vertices(graph)[vertex]]
end
function Base.getindex(
graph::AbstractDataGraph, edge::Pair{<:OrdinalSuffixedInteger,<:OrdinalSuffixedInteger}
)
return graph[edgetype(graph)(vertices(graph)[edge[1]], vertices(graph)[edge[2]])]
end
function Base.setindex!(graph::AbstractDataGraph, value, vertex::OrdinalSuffixedInteger)
graph[vertices(graph)[vertex]] = value
return graph
end
function Base.setindex!(
graph::AbstractDataGraph,
value,
edge::Pair{<:OrdinalSuffixedInteger,<:OrdinalSuffixedInteger},
)
graph[edgetype(graph)(vertices(graph)[edge[1]], vertices(graph)[edge[2]])] = value
return graph
end
end
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 459 | module DataGraphs
include("utils.jl")
include("traits/isunderlyinggraph.jl")
include("abstractdatagraph.jl")
include("arrange.jl")
include("datagraph.jl")
# TODO: Turn into a weak dependency once `GraphsExtensions`
# is split off from `NamedGraphs`.
include("../ext/DataGraphsNamedGraphsExt/DataGraphsNamedGraphsExt.jl")
export AbstractDataGraph, DataGraph
using PackageExtensionCompat: @require_extensions
function __init__()
@require_extensions
end
end
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 13445 | using Dictionaries: set!, unset!
using Graphs:
Graphs, AbstractEdge, AbstractGraph, IsDirected, add_edge!, edges, ne, nv, vertices
using NamedGraphs.GraphsExtensions: GraphsExtensions, incident_edges, vertextype
using NamedGraphs.SimilarType: similar_type
using SimpleTraits: SimpleTraits, Not, @traitfn
abstract type AbstractDataGraph{V,VD,ED} <: AbstractGraph{V} end
# Minimal interface
# TODO: Define for `AbstractGraph` as a `DataGraphInterface`.
underlying_graph(::AbstractDataGraph) = not_implemented()
underlying_graph_type(::Type{<:AbstractDataGraph}) = not_implemented()
vertex_data(::AbstractDataGraph) = not_implemented()
vertex_data_eltype(::Type{<:AbstractDataGraph}) = not_implemented()
edge_data(::AbstractDataGraph) = not_implemented()
edge_data_eltype(::Type{<:AbstractDataGraph}) = not_implemented()
# Derived interface
function Graphs.edgetype(graph_type::Type{<:AbstractDataGraph})
return Graphs.edgetype(underlying_graph_type(graph_type))
end
function Graphs.is_directed(graph_type::Type{<:AbstractDataGraph})
return Graphs.is_directed(underlying_graph_type(graph_type))
end
underlying_graph_type(graph::AbstractDataGraph) = typeof(underlying_graph(graph))
vertex_data_eltype(graph::AbstractDataGraph) = eltype(vertex_data(graph))
edge_data_eltype(graph::AbstractDataGraph) = eltype(edge_data(graph))
# TODO: delete, defined for AbstractGraph{V}?
function GraphsExtensions.vertextype(graph_type::Type{<:AbstractDataGraph})
return vertextype(underlying_graph_type(graph_type))
end
GraphsExtensions.vertextype(graph::AbstractDataGraph) = vertextype(typeof(graph))
Base.zero(graph_type::Type{<:AbstractDataGraph}) = graph_type()
# Graphs overloads
for f in [
:(Graphs.a_star),
:(Graphs.add_edge!),
:(Graphs.add_vertex!),
:(Graphs.adjacency_matrix),
:(Graphs.bellman_ford_shortest_paths),
:(Graphs.bfs_parents),
:(Graphs.bfs_tree),
:(Graphs.boruvka_mst),
:(Graphs.center),
:(Graphs.common_neighbors),
:(Graphs.connected_components),
:(Graphs.degree),
:(Graphs.degree_histogram),
:(Graphs.desopo_pape_shortest_paths),
:(Graphs.dfs_parents),
:(Graphs.dfs_tree),
:(Graphs.diameter),
:(Graphs.dijkstra_shortest_paths),
:(Graphs.eccentricity),
:(Graphs.edges),
:(Graphs.edgetype),
:(Graphs.eltype),
:(Graphs.enumerate_paths),
:(Graphs.floyd_warshall_shortest_paths),
:(Graphs.has_edge),
:(Graphs.has_path),
:(Graphs.has_vertex),
:(Graphs.inneighbors),
:(Graphs.is_connected),
:(Graphs.is_cyclic),
:(Graphs.is_directed),
:(Graphs.is_strongly_connected),
:(Graphs.is_weakly_connected),
:(Graphs.mincut),
:(Graphs.ne),
:(Graphs.neighbors),
:(Graphs.neighborhood),
:(Graphs.neighborhood_dists),
:(Graphs.johnson_shortest_paths),
:(Graphs.spfa_shortest_paths),
:(Graphs.yen_k_shortest_paths),
:(Graphs.kruskal_mst),
:(Graphs.prim_mst),
:(Graphs.nv),
:(Graphs.outneighbors),
:(Graphs.periphery),
:(Graphs.radius),
:(Graphs.steiner_tree),
:(Graphs.topological_sort_by_dfs),
:(Graphs.tree),
:(Graphs.vertices),
:(GraphsExtensions.boundary_edges),
:(GraphsExtensions.boundary_vertices),
:(GraphsExtensions.eccentricities),
:(GraphsExtensions.inner_boundary_vertices),
:(GraphsExtensions.mincut_partitions),
:(GraphsExtensions.outer_boundary_vertices),
:(GraphsExtensions.symrcm_perm),
:(GraphsExtensions.symrcm_permute),
]
@eval begin
function $f(graph::AbstractDataGraph, args...; kwargs...)
return $f(underlying_graph(graph), args...; kwargs...)
end
end
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.degree(graph::AbstractDataGraph, vertex::Integer)
return Graphs.degree(underlying_graph(graph), vertex)
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.dijkstra_shortest_paths(
graph::AbstractDataGraph, vertices::Vector{<:Integer}
)
return Graphs.dijkstra_shortest_paths(underlying_graph(graph), vertices)
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.eccentricity(graph::AbstractDataGraph, distmx::AbstractMatrix)
return Graphs.eccentricity(underlying_graph(graph), distmx)
end
# Fix for ambiguity error with `AbstractGraph` version
function indegree(graph::AbstractDataGraph, vertex::Integer)
return indegree(underlying_graph(graph), vertex)
end
# Fix for ambiguity error with `AbstractGraph` version
function outdegree(graph::AbstractDataGraph, vertex::Integer)
return outdegree(underlying_graph(graph), vertex)
end
@traitfn GraphsExtensions.directed_graph(graph::AbstractDataGraph::IsDirected) = graph
@traitfn function GraphsExtensions.directed_graph(graph::AbstractDataGraph::(!IsDirected))
digraph = directed_graph(typeof(graph))(directed_graph(underlying_graph(graph)))
for v in vertices(graph)
# TODO: Only loop over `keys(vertex_data(graph))`
if isassigned(graph, v)
digraph[v] = graph[v]
end
end
for e in edges(graph)
# TODO: Only loop over `keys(edge_data(graph))`
# TODO: Are these needed?
add_edge!(digraph, e)
add_edge!(digraph, reverse(e))
if isassigned(graph, e)
digraph[e] = graph[e]
digraph[reverse(e)] = reverse_data_direction(graph, graph[e])
end
end
return digraph
end
function Base.reverse(graph::AbstractDataGraph)
reversed_graph = typeof(graph)(reverse(underlying_graph(graph)))
for v in vertices(graph)
if isassigned(graph, v)
reversed_graph[v] = graph[v]
end
end
for e in edges(graph)
if isassigned(graph, e)
reversed_graph[reverse(e)] = graph[e]
end
end
return reversed_graph
end
function Graphs.merge_vertices(
graph::AbstractDataGraph,
merge_vertices;
merge_data=(x, y) -> y,
merge_vertex_data=merge_data,
merge_edge_data=merge_data,
kwargs...,
)
underlying_merged_graph = Graphs.merge_vertices(underlying_graph(graph); kwargs...)
return not_implemented()
end
function Graphs.merge_vertices!(
graph::AbstractDataGraph,
merge_vertices;
merge_data=(x, y) -> y,
merge_vertex_data=merge_data,
merge_edge_data=merge_data,
kwargs...,
)
underlying_merged_graph = copy(underlying_graph(graph))
Graphs.merge_vertices!(underlying_merged_graph; kwargs...)
return not_implemented()
end
# Union the vertices and edges of the graphs and
# merge the vertex and edge metadata.
function Base.union(
graph1::AbstractDataGraph,
graph2::AbstractDataGraph;
merge_data=(x, y) -> y,
merge_vertex_data=merge_data,
merge_edge_data=merge_data,
)
underlying_graph_union = union(underlying_graph(graph1), underlying_graph(graph2))
vertex_data_merge = mergewith(merge_vertex_data, vertex_data(graph1), vertex_data(graph2))
edge_data_merge = mergewith(merge_edge_data, edge_data(graph1), edge_data(graph2))
# TODO: Convert to `promote_type(typeof(graph1), typeof(graph2))`
return _DataGraph(underlying_graph_union, vertex_data_merge, edge_data_merge)
end
function Base.union(
graph1::AbstractDataGraph,
graph2::AbstractDataGraph,
graph3::AbstractDataGraph,
graphs_tail::AbstractDataGraph...;
kwargs...,
)
return union(union(graph1, graph2; kwargs...), graph3, graphs_tail...; kwargs...)
end
function GraphsExtensions.rename_vertices(f::Function, graph::AbstractDataGraph)
renamed_underlying_graph = GraphsExtensions.rename_vertices(f, underlying_graph(graph))
# TODO: Base the ouput type on `typeof(graph)`, for example:
# convert_vertextype(eltype(renamed_vertices), typeof(graph))(renamed_underlying_graph)
renamed_graph = DataGraph(
renamed_underlying_graph;
vertex_data_eltype=vertex_data_eltype(graph),
edge_data_eltype=edge_data_eltype(graph),
)
for v in keys(vertex_data(graph))
renamed_graph[f(v)] = graph[v]
end
for e in keys(edge_data(graph))
renamed_graph[GraphsExtensions.rename_vertices(f, e)] = graph[e]
end
return renamed_graph
end
function Graphs.rem_vertex!(graph::AbstractDataGraph, vertex)
neighbor_edges = incident_edges(graph, vertex)
# unset!(vertex_data(graph), to_vertex(graph, vertex...))
unset!(vertex_data(graph), vertex)
for neighbor_edge in neighbor_edges
unset!(edge_data(graph), neighbor_edge)
end
Graphs.rem_vertex!(underlying_graph(graph), vertex)
return graph
end
function Graphs.rem_edge!(graph::AbstractDataGraph, edge)
unset!(edge_data(graph), edge)
Graphs.rem_edge!(underlying_graph(graph), edge)
return graph
end
# Fix ambiguity with:
# Graphs.neighbors(graph::AbstractGraph, v::Integer)
function Graphs.neighbors(graph::AbstractDataGraph, v::Integer)
return Graphs.neighbors(underlying_graph(graph), v)
end
# Fix ambiguity with:
# Graphs.bfs_tree(graph::AbstractGraph, s::Integer; dir)
function Graphs.bfs_tree(graph::AbstractDataGraph, s::Integer; kwargs...)
return Graphs.bfs_tree(underlying_graph(graph), s; kwargs...)
end
# Fix ambiguity with:
# Graphs.dfs_tree(graph::AbstractGraph, s::Integer; dir)
function Graphs.dfs_tree(graph::AbstractDataGraph, s::Integer; kwargs...)
return Graphs.dfs_tree(underlying_graph(graph), s; kwargs...)
end
function map_vertex_data(f, graph::AbstractDataGraph; vertices=nothing)
graphβ² = copy(graph)
vs = isnothing(vertices) ? Graphs.vertices(graph) : vertices
for v in vs
graphβ²[v] = f(graph[v])
end
return graphβ²
end
function map_edge_data(f, graph::AbstractDataGraph; edges=nothing)
graphβ² = copy(graph)
es = isnothing(edges) ? Graphs.edges(graph) : edges
for e in es
if isassigned(graph, e)
graphβ²[e] = f(graph[e])
end
end
return graphβ²
end
function map_data(f, graph::AbstractDataGraph; vertices=nothing, edges=nothing)
graph = map_vertex_data(f, graph; vertices)
return map_edge_data(f, graph; edges)
end
function Base.getindex(graph::AbstractDataGraph, vertex)
return vertex_data(graph)[vertex]
end
function Base.get(graph::AbstractDataGraph, vertex, default)
return get(vertex_data(graph), vertex, default)
end
function Base.get!(graph::AbstractDataGraph, vertex, default)
return get!(vertex_data(graph), vertex, default)
end
function Base.getindex(graph::AbstractDataGraph, edge::AbstractEdge)
is_edge_arranged_ = is_edge_arranged(graph, edge)
data = edge_data(graph)[arrange(is_edge_arranged_, edge)]
return reverse_data_direction(is_edge_arranged_, graph, data)
end
# Support syntax `g[v1 => v2]`
function Base.getindex(graph::AbstractDataGraph, edge::Pair)
return graph[edgetype(graph)(edge)]
end
function Base.get(graph::AbstractDataGraph, edge::AbstractEdge, default)
is_edge_arranged_ = is_edge_arranged(graph, edge)
data = get(edge_data(graph), arrange(is_edge_arranged_, edge), default)
return reverse_data_direction(is_edge_arranged_, graph, data)
end
function Base.get(graph::AbstractDataGraph, edge::Pair, default)
return get(graph, edgetype(graph)(edge), default)
end
function Base.get!(graph::AbstractDataGraph, edge::AbstractEdge, default)
is_edge_arranged_ = is_edge_arranged(graph, edge)
data = get!(edge_data(graph), arrange(is_edge_arranged_, edge), default)
return reverse_data_direction(is_edge_arranged_, graph, data)
end
function Base.get!(graph::AbstractDataGraph, edge::Pair, default)
return get!(graph, edgetype(graph)(edge), default)
end
# Support syntax `g[1, 2] = g[(1, 2)]`
function Base.getindex(graph::AbstractDataGraph, i1, i2, i...)
return graph[(i1, i2, i...)]
end
function Base.isassigned(graph::AbstractDataGraph, vertex)
return isassigned(vertex_data(graph), vertex)
end
function Base.isassigned(graph::AbstractDataGraph, vertex::AbstractEdge)
return isassigned(edge_data(graph), arrange(graph, vertex))
end
function Base.isassigned(graph::AbstractDataGraph, vertex::Pair)
return isassigned(graph, edgetype(graph)(vertex))
end
function Base.setindex!(graph::AbstractDataGraph, data, vertex)
set!(vertex_data(graph), vertex, data)
return graph
end
function Base.setindex!(graph::AbstractDataGraph, data, edge::AbstractEdge)
is_edge_arranged_ = is_edge_arranged(graph, edge)
arranged_edge = arrange(is_edge_arranged_, edge)
arranged_data = reverse_data_direction(is_edge_arranged_, graph, data)
set!(edge_data(graph), arranged_edge, arranged_data)
return graph
end
function Base.setindex!(graph::AbstractDataGraph, data, edge::Pair)
graph[edgetype(graph)(edge)] = data
return graph
end
# Support syntax `g[1, 2] = g[(1, 2)]`
function Base.setindex!(graph::AbstractDataGraph, x, i1, i2, i...)
graph[(i1, i2, i...)] = x
return graph
end
function Graphs.induced_subgraph(graph::AbstractDataGraph, subvertices)
underlying_subgraph, vlist = Graphs.induced_subgraph(underlying_graph(graph), subvertices)
subgraph = similar_type(graph)(underlying_subgraph)
for v in vertices(subgraph)
if isassigned(graph, v)
subgraph[v] = graph[v]
end
end
for e in edges(subgraph)
if isassigned(graph, e)
subgraph[e] = graph[e]
end
end
return subgraph, vlist
end
#
# Printing
#
function Base.show(io::IO, mime::MIME"text/plain", graph::AbstractDataGraph)
println(io, "$(typeof(graph)) with $(nv(graph)) vertices:")
show(io, mime, vertices(graph))
println(io, "\n")
println(io, "and $(ne(graph)) edge(s):")
for e in edges(graph)
show(io, mime, e)
println(io)
end
println(io)
println(io, "with vertex data:")
show(io, mime, vertex_data(graph))
println(io)
println(io)
println(io, "and edge data:")
show(io, mime, edge_data(graph))
return nothing
end
Base.show(io::IO, graph::AbstractDataGraph) = show(io, MIME"text/plain"(), graph)
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 2426 | using Graphs: IsDirected, src, dst
using SimpleTraits: SimpleTraits, Not, @traitfn
# TODO: Use a function `arrange` like in MetaGraphsNext:
# https://github.com/JuliaGraphs/MetaGraphsNext.jl/blob/1539095ee6088aba0d5b1cb057c339ad92557889/src/metagraph.jl#L75-L80
# to sort the vertices, only directed graphs should store data
# in both edge directions. Also, define `reverse_data_direction` as a function
# stored in directed AbstractDataGraph types (which by default returns nothing,
# indicating not to automatically store data in both directions)
# TODO: Use `Graphs.is_ordered`? https://juliagraphs.org/Graphs.jl/v1.7/core_functions/core/#Graphs.is_ordered-Tuple{AbstractEdge}
function is_arranged(src, dst)
if !hasmethod(isless, typeof.((src, dst)))
return is_arranged_by_hash(src, dst)
end
return isless(src, dst)
end
function is_arranged_by_hash(src, dst)
src_hash = hash(src)
dst_hash = hash(dst)
if (src_hash == dst_hash) && (src β dst)
@warn "Hash collision when arranging vertices to extract edge data. Setting or extracting data may be ill-defined."
end
return isless(src_hash, dst_hash)
end
# https://github.com/JuliaLang/julia/blob/v1.8.5/base/tuple.jl#L470-L482
is_arranged(::Tuple{}, ::Tuple{}) = false
is_arranged(::Tuple{}, ::Tuple) = true
is_arranged(::Tuple, ::Tuple{}) = false
function is_arranged(t1::Tuple, t2::Tuple)
a, b = t1[1], t2[1]
return is_arranged(a, b) || (isequal(a, b) && is_arranged(Base.tail(t1), Base.tail(t2)))
end
@traitfn function is_edge_arranged(graph::AbstractDataGraph::IsDirected, src, dst)
return true
end
@traitfn function is_edge_arranged(graph::AbstractDataGraph::(!IsDirected), src, dst)
return is_arranged(src, dst)
end
function is_edge_arranged(graph::AbstractDataGraph, edge::AbstractEdge)
return is_edge_arranged(graph, src(edge), dst(edge))
end
function arrange(graph::AbstractDataGraph, edge::AbstractEdge)
return arrange(is_edge_arranged(graph, edge), edge)
end
function arrange(is_arranged::Bool, edge::AbstractEdge)
return is_arranged ? edge : reverse(edge)
end
# TODO: Store `reverse_data_direction` inside `AbstractDataGraph`
# to control data direction reversal by instance instead of
# just by type.
reverse_data_direction(graph::AbstractDataGraph, data) = data
function reverse_data_direction(is_arranged::Bool, graph::AbstractDataGraph, data)
return is_arranged ? data : reverse_data_direction(graph, data)
end
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 4235 | using Dictionaries: Dictionary
using Graphs: Graphs, edgetype
using Graphs.SimpleGraphs: SimpleGraph
using NamedGraphs.GraphsExtensions: convert_vertextype, directed_graph, vertextype
# TODO: define VertexDataGraph, a graph with only data on the
# vertices, and EdgeDataGraph, a graph with only data on the edges.
# TODO: Use https://github.com/vtjnash/ComputedFieldTypes.jl to
# automatically determine `E` from `G` from `edgetype(G)`
# and `V` from `G` as `vertextype(G)`.
struct DataGraph{V,VD,ED,G<:AbstractGraph,E<:AbstractEdge} <: AbstractDataGraph{V,VD,ED}
underlying_graph::G
vertex_data::Dictionary{V,VD}
edge_data::Dictionary{E,ED}
global function _DataGraph(
underlying_graph::AbstractGraph, vertex_data::Dictionary, edge_data::Dictionary
)
return new{
vertextype(underlying_graph),
eltype(vertex_data),
eltype(edge_data),
typeof(underlying_graph),
edgetype(underlying_graph),
}(
underlying_graph, vertex_data, edge_data
)
end
end
underlying_graph_type(G::Type{<:DataGraph}) = fieldtype(G, :underlying_graph)
vertex_data_eltype(G::Type{<:DataGraph}) = eltype(fieldtype(G, :vertex_data))
edge_data_eltype(G::Type{<:DataGraph}) = eltype(fieldtype(G, :edge_data))
underlying_graph(graph::DataGraph) = getfield(graph, :underlying_graph)
vertex_data(graph::DataGraph) = getfield(graph, :vertex_data)
edge_data(graph::DataGraph) = getfield(graph, :edge_data)
# TODO: Is this needed? Maybe define a generic `AbstractDataGraph` version.
Graphs.is_directed(G::Type{<:DataGraph}) = Graphs.is_directed(underlying_graph_type(G))
# TODO: Implement in terms of `set_underlying_graph`, `set_vertex_data`, etc.
# TODO: Use `https://github.com/JuliaObjects/Accessors.jl`?
function Base.copy(graph::DataGraph)
# Need to manually copy the keys of Dictionaries, see:
# https://github.com/andyferris/Dictionaries.jl/issues/98
return _DataGraph(
copy(underlying_graph(graph)), copy(vertex_data(graph)), copy(edge_data(graph))
)
end
function DataGraph{V}(
underlying_graph::AbstractGraph; vertex_data_eltype::Type=Any, edge_data_eltype::Type=Any
) where {V}
converted_underlying_graph = convert_vertextype(V, underlying_graph)
return _DataGraph(
converted_underlying_graph,
Dictionary{vertextype(converted_underlying_graph),vertex_data_eltype}(),
Dictionary{edgetype(converted_underlying_graph),edge_data_eltype}(),
)
end
function DataGraph(underlying_graph::AbstractGraph; kwargs...)
return DataGraph{vertextype(underlying_graph)}(underlying_graph; kwargs...)
end
function DataGraph{V,VD,ED,G,E}(underlying_graph::AbstractGraph) where {V,VD,ED,G,E}
@assert edgetype(underlying_graph) === E
return _DataGraph(convert(G, underlying_graph), Dictionary{V,VD}(), Dictionary{E,ED}())
end
# Type conversions
DataGraph{V,VD,ED,G}(graph::DataGraph{V,VD,ED,G}) where {V,VD,ED,G} = copy(graph)
DataGraph{V,VD,ED}(graph::DataGraph{V,VD,ED}) where {V,VD,ED} = copy(graph)
DataGraph{V,VD}(graph::DataGraph{V,VD}) where {V,VD} = copy(graph)
DataGraph{V}(graph::DataGraph{V}) where {V} = copy(graph)
function DataGraph{V}(graph::DataGraph) where {V}
# TODO: Make sure this properly copies
converted_underlying_graph = convert_vertextype(V, underlying_graph(graph))
converted_vertex_data = Dictionary{V}(vertex_data(graph))
# This doesn't convert properly.
# converted_edge_data = Dictionary{edgetype(converted_underlying_graph)}(edge_data(graph))
converted_edge_data = Dictionary(
edgetype(converted_underlying_graph).(keys(edge_data(graph))), values(edge_data(graph))
)
return _DataGraph(converted_underlying_graph, converted_vertex_data, converted_edge_data)
end
GraphsExtensions.convert_vertextype(::Type{V}, graph::DataGraph{V}) where {V} = graph
function GraphsExtensions.convert_vertextype(vertextype::Type, graph::DataGraph)
return DataGraph{vertextype}(graph)
end
# TODO: implement generic version in terms of `set_underlying_graph_type`.
function GraphsExtensions.directed_graph_type(graph_type::Type{<:DataGraph})
return DataGraph{
vertextype(graph_type),
vertex_data_eltype(graph_type),
edgetype(graph_type),
directed_graph_type(underlying_graph_type(graph_type)),
edgetype(graph_type),
}
end
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 45 | not_implemented() = error("Not implemented")
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 368 | using SimpleTraits: SimpleTraits, Not, @traitdef, @traitimpl
@traitdef IsUnderlyingGraph{X}
#! format: off
@traitimpl IsUnderlyingGraph{X} <- is_underlying_graph(X)
#! format: on
using Graphs: AbstractGraph
is_underlying_graph(::Type{<:AbstractGraph}) = false
using Graphs.SimpleGraphs: AbstractSimpleGraph
is_underlying_graph(::Type{<:AbstractSimpleGraph}) = true
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | code | 12841 | @eval module $(gensym())
using DataGraphs:
DataGraphs,
DataGraph,
edge_data,
edge_data_eltype,
is_arranged,
vertex_data,
vertex_data_eltype
using Dictionaries: AbstractIndices, Dictionary, Indices, dictionary
using Graphs:
add_edge!,
bfs_tree,
connected_components,
degree,
dfs_tree,
dijkstra_shortest_paths,
dst,
edges,
edgetype,
grid,
has_edge,
has_vertex,
indegree,
ne,
nv,
outdegree,
path_graph,
src,
vertices
using Graphs.SimpleGraphs: SimpleDiGraph, SimpleEdge, SimpleGraph
using GraphsFlows: GraphsFlows
using NamedGraphs: NamedDiGraph, NamedEdge, NamedGraph
using NamedGraphs.GraphsExtensions: β, rename_vertices, vertextype
using NamedGraphs.NamedGraphGenerators: named_grid, named_path_graph
using NamedGraphs.OrdinalIndexing: nd, st, rd, th
using Test: @test, @test_broken, @testset
using DataGraphs: is_arranged
@testset "DataGraphs.jl" begin
@eval module $(gensym())
using DataGraphs: DataGraphs
using Suppressor: @suppress
using Test: @testset
@testset "Examples" begin
examples_path = joinpath(pkgdir(DataGraphs), "examples")
@testset "Run examples: $filename" for filename in readdir(examples_path)
if endswith(filename, ".jl")
@suppress include(joinpath(examples_path, filename))
end
end
end
end
@testset "is_arranged" begin
for (a, b) in [
(1, 2),
([1], [2]),
([1, 2], [2, 1]),
([1, 2], [2]),
([2], [2, 1]),
((1,), (2,)),
((1, 2), (2, 1)),
((1, 2), (2,)),
((2,), (2, 1)),
("X", 1),
(("X",), (1, 2)),
]
@test is_arranged(a, b)
@test !is_arranged(b, a)
end
end
@testset "Basics" begin
g = grid((4,))
dg = DataGraph(g; vertex_data_eltype=String, edge_data_eltype=Symbol)
@test !isassigned(dg, SimpleEdge(1, 2))
@test !isassigned(dg, 1 => 2)
@test !isassigned(dg, SimpleEdge(1 => 2))
@test !isassigned(dg, 1 => 3)
@test !isassigned(dg, 1)
@test !isassigned(dg, 2)
@test !isassigned(dg, 3)
@test !isassigned(dg, 4)
@test degree(g, 1) == 1
@test indegree(g, 1) == 1
@test outdegree(g, 1) == 1
@test degree(g, 2) == 2
@test indegree(g, 2) == 2
@test outdegree(g, 2) == 2
@test has_edge(dg, 1, 2)
@test has_edge(dg, 1 => 2)
@test !has_edge(dg, 1, 3)
@test !has_edge(dg, 1 => 3)
@test has_vertex(dg, 1)
@test has_vertex(dg, 4)
@test !has_vertex(dg, 0)
@test !has_vertex(dg, 5)
dg[1] = "V1"
dg[2] = "V2"
dg[3] = "V3"
dg[4] = "V4"
@test isassigned(dg, 1)
@test dg[1] == "V1"
@test dg[2] == "V2"
@test dg[3] == "V3"
@test dg[4] == "V4"
dg[1 => 2] = :E12
dg[2 => 3] = :E23
dg[SimpleEdge(3, 4)] = :E34
#@test isassigned(dg, (1, 2))
@test isassigned(dg, SimpleEdge(2, 3))
@test isassigned(dg, 3 => 4)
@test dg[SimpleEdge(1, 2)] == :E12
@test dg[2 => 3] == :E23
@test dg[3 => 4] == :E34
# Regression test
# g = NamedGraph([(1, 1), (1, (1, 1))])
dg = DataGraph(NamedGraph([(1, 1), (1, (1, 1))]))
dg[(1, 1) => (1, (1, 1))] = "X"
@test dg[(1, 1) => (1, (1, 1))] == "X"
vdata = map(v -> "V$v", Indices(1:4))
edata = map(e -> "E$(src(e))$(dst(e))", Indices(SimpleEdge.([1 => 2, 2 => 3, 3 => 4])))
# TODO: Make a more compact constructor that directly accepts
# vertex and edge data? Maybe `DataGraph(g; vertex_data=vdata, edge_data=edata)`
# or `DataGraph(g; vertex_data=v -> "V$v", edge_data=e -> "E$(src(e))$(dst(e))")`.
dg = DataGraph(g; vertex_data_eltype=eltype(vdata), edge_data_eltype=eltype(edata))
for v in vertices(dg)
dg[v] = vdata[v]
end
for e in edges(dg)
dg[e] = edata[e]
end
@test dg[1] == "V1"
@test dg[2] == "V2"
@test dg[3] == "V3"
@test dg[4] == "V4"
@test dg[1 => 2] == "E12"
@test dg[2 => 3] == "E23"
@test dg[3 => 4] == "E34"
@test DataGraph(g) isa DataGraph{Int,Any,Any,SimpleGraph{Int},SimpleEdge{Int}}
dg_uint16 = DataGraph{UInt16}(dg)
@test dg_uint16 isa
DataGraph{UInt16,String,String,SimpleGraph{UInt16},SimpleEdge{UInt16}}
@test vertextype(dg_uint16) === UInt16
@test edgetype(dg_uint16) === SimpleEdge{UInt16}
@test vertex_data_eltype(dg_uint16) === String
@test edge_data_eltype(dg_uint16) === String
@test dg_uint16[1] == "V1"
@test dg_uint16[2] == "V2"
@test dg_uint16[3] == "V3"
@test dg_uint16[4] == "V4"
@test dg_uint16[1 => 2] == "E12"
@test dg_uint16[2 => 3] == "E23"
@test dg_uint16[3 => 4] == "E34"
# Vertices with mixed types
dg = DataGraph(NamedGraph(grid((4,)), [1, "X", 2, "Y"]))
@test nv(dg) == 4
@test ne(dg) == 3
dg[1] = "vertex_1"
dg["X"] = "vertex_X"
dg[2] = "vertex_2"
dg["Y"] = "vertex_Y"
@test dg[1] == "vertex_1"
@test dg["X"] == "vertex_X"
@test dg[2] == "vertex_2"
@test dg["Y"] == "vertex_Y"
dg[1 => "X"] = "edge_1X"
dg["X" => 2] = "edge_X2"
dg[2 => "Y"] = "edge_2Y"
@test dg[1 => "X"] == "edge_1X"
@test dg["X" => 1] == "edge_1X"
@test dg["X" => 2] == "edge_X2"
@test dg[2 => "X"] == "edge_X2"
@test dg[2 => "Y"] == "edge_2Y"
@test dg["Y" => 2] == "edge_2Y"
dg["X" => 1] = "edge_X1"
dg[2 => "X"] = "edge_2X"
dg["Y" => 2] = "edge_Y2"
@test dg[1 => "X"] == "edge_X1"
@test dg["X" => 1] == "edge_X1"
@test dg["X" => 2] == "edge_2X"
@test dg[2 => "X"] == "edge_2X"
@test dg[2 => "Y"] == "edge_Y2"
@test dg["Y" => 2] == "edge_Y2"
end
@testset "get and get! functions" begin
g = grid((4,))
dg = DataGraph(g; vertex_data_eltype=String, edge_data_eltype=Symbol)
# Test for vertices
@test get(dg, 1, "default") == "default"
@test !isassigned(dg, 1)
@test get!(dg, 2, "default") == "default"
@test isassigned(dg, 2)
@test dg[2] == "default"
# Test for edges
@test get(dg, 1 => 2, :default) == :default
@test !isassigned(dg, 1 => 2)
@test get!(dg, 1 => 2, :default) == :default
@test isassigned(dg, 1 => 2)
@test dg[1 => 2] == :default
end
@testset "Constructors specifying vertex type" begin
dg = DataGraph{Float64}(
named_path_graph(4); vertex_data_eltype=String, edge_data_eltype=Symbol
)
@test nv(dg) == 4
@test ne(dg) == 3
@test edgetype(dg) === NamedEdge{Float64}
@test vertextype(dg) === Float64
@test vertex_data_eltype(dg) === String
@test edge_data_eltype(dg) === Symbol
@test issetequal(vertices(dg), Float64.(1:4))
@test vertices(dg) isa AbstractIndices{Float64}
@test eltype(vertices(dg)) === Float64
@test has_edge(dg, 1.0 => 2.0)
@test has_edge(dg, 2.0 => 3.0)
@test has_edge(dg, 3.0 => 4.0)
@test vertex_data(dg) == Dictionary{Float64,String}()
@test vertex_data(dg) isa Dictionary{Float64,String}
@test keytype(vertex_data(dg)) === Float64
@test eltype(vertex_data(dg)) === String
@test edge_data(dg) == Dictionary{NamedEdge{Float64},Symbol}()
@test edge_data(dg) isa Dictionary{NamedEdge{Float64},Symbol}
@test keytype(edge_data(dg)) === NamedEdge{Float64}
@test eltype(edge_data(dg)) === Symbol
end
@testset "Disjoint unions" begin
g = DataGraph(named_grid((2, 2)); vertex_data_eltype=String, edge_data_eltype=String)
for v in vertices(g)
g[v] = "V$v"
end
for e in edges(g)
g[e] = "E$e"
end
gg = g β g
@test has_vertex(gg, ((1, 1), 1))
@test has_vertex(gg, ((1, 1), 2))
@test has_edge(gg, ((1, 1), 1) => ((1, 2), 1))
@test has_edge(gg, ((1, 1), 2) => ((1, 2), 2))
@test nv(gg) == 2nv(g)
@test ne(gg) == 2ne(g)
# TODO: Define `vcat`, `hcat`, `hvncat`?
gg = [g; g]
@test_broken has_vertex(gg, (1, 1))
@test_broken has_vertex(gg, (2, 1))
@test_broken has_vertex(gg, (3, 1))
@test_broken has_vertex(gg, (4, 1))
@test_broken has_edge(gg, (1, 1) => (1, 2))
@test_broken has_edge(gg, (3, 1) => (3, 2))
@test_broken nv(gg) == 2nv(g)
@test_broken ne(gg) == 2ne(g)
gg = [g;; g]
@test_broken has_vertex(gg, (1, 1))
@test_broken has_vertex(gg, (1, 2))
@test_broken has_vertex(gg, (1, 3))
@test_broken has_vertex(gg, (1, 4))
@test_broken has_edge(gg, (1, 1) => (1, 2))
@test_broken has_edge(gg, (1, 3) => (1, 4))
@test_broken nv(gg) == 2nv(g)
@test_broken ne(gg) == 2ne(g)
end
@testset "union" begin
g1 = DataGraph(grid((4,)))
g1[1] = ["A", "B", "C"]
g1[1 => 2] = ["E", "F"]
g2 = DataGraph(SimpleGraph(5))
add_edge!(g2, 1 => 5)
g2[1] = ["C", "D", "E"]
# Same as:
# union(g1, g2; merge_data=(x, y) -> y)
g = union(g1, g2)
@test nv(g) == 5
@test ne(g) == 4
@test has_edge(g, 1 => 2)
@test has_edge(g, 2 => 3)
@test has_edge(g, 3 => 4)
@test has_edge(g, 1 => 5)
@test g[1] == ["C", "D", "E"]
@test g[1 => 2] == ["E", "F"]
g = union(g1, g2; merge_data=union)
@test nv(g) == 5
@test ne(g) == 4
@test has_edge(g, 1 => 2)
@test has_edge(g, 2 => 3)
@test has_edge(g, 3 => 4)
@test has_edge(g, 1 => 5)
@test g[1] == ["A", "B", "C", "D", "E"]
@test g[1 => 2] == ["E", "F"]
end
@testset "connected_components" begin
g1 = named_path_graph(3)
g2 = rename_vertices(v -> nv(g1) + v, g1)
dg1 = DataGraph(g1)
dg1[1] = "A1"
dg1[2] = "B1"
dg1[3] = "C1"
dg1[1 => 2] = "A1" => "B1"
dg1[2 => 3] = "B1" => "C1"
dg2 = DataGraph(g2)
dg2[1] = "A2"
dg2[2] = "B2"
dg2[3] = "C2"
dg2[1 => 2] = "A2" => "B2"
dg2[2 => 3] = "B2" => "C2"
dg = union(dg1, dg2)
comps = connected_components(dg)
@test length(comps) == 2
@test issetequal(comps[1], [1, 2, 3])
@test issetequal(comps[2], [4, 5, 6])
end
@testset "reverse" begin
g = DataGraph(SimpleDiGraph(4))
add_edge!(g, 1 => 2)
add_edge!(g, 3 => 4)
g[1 => 2] = :A
g[3 => 4] = "X"
rg = reverse(g)
@test has_edge(rg, 2 => 1)
@test has_edge(rg, 4 => 3)
@test rg[2 => 1] == :A
@test isassigned(rg, 2 => 1)
@test !isassigned(rg, 1 => 2)
@test rg[4 => 3] == "X"
@test !isassigned(rg, 3 => 4)
@test isassigned(rg, 4 => 3)
end
@testset "Tree traversal" begin
g = DataGraph(named_grid(4))
t = bfs_tree(g, 2)
es = [2 => 1, 2 => 3, 3 => 4]
@test t isa NamedDiGraph{Int}
@test nv(t) == nv(g)
@test ne(t) == nv(g) - 1
@test all(e -> has_edge(t, e), es)
t = dfs_tree(g, 2)
@test t isa NamedDiGraph{Int}
@test nv(t) == nv(g)
@test ne(t) == nv(g) - 1
@test all(e -> has_edge(t, e), es)
g = DataGraph(named_grid((4, 2)))
t = bfs_tree(g, (1, 1))
es = [
(1, 1) => (1, 2),
(1, 1) => (2, 1),
(2, 1) => (2, 2),
(2, 1) => (3, 1),
(3, 1) => (3, 2),
(3, 1) => (4, 1),
(4, 1) => (4, 2),
]
@test t isa NamedDiGraph{Tuple{Int,Int}}
@test nv(t) == nv(g)
@test ne(t) == nv(g) - 1
@test all(e -> has_edge(t, e), es)
t = dfs_tree(g, (1, 1))
es = [
(1, 1) => (2, 1),
(2, 1) => (3, 1),
(3, 1) => (4, 1),
(4, 1) => (4, 2),
(4, 2) => (3, 2),
(3, 2) => (2, 2),
(2, 2) => (1, 2),
]
@test t isa NamedDiGraph{Tuple{Int,Int}}
@test nv(t) == nv(g)
@test ne(t) == nv(g) - 1
@test all(e -> has_edge(t, e), es)
end
@testset "dijkstra_shortest_paths" begin
g = DataGraph(named_grid(4))
ps = dijkstra_shortest_paths(g, [1])
@test ps.dists == dictionary([1 => 0, 2 => 1, 3 => 2, 4 => 3])
@test ps.parents == dictionary([1 => 1, 2 => 1, 3 => 2, 4 => 3])
@test ps.pathcounts == dictionary([1 => 1.0, 2 => 1.0, 3 => 1.0, 4 => 1.0])
end
@testset "GraphsFlows.mincut (vertextype=$(eltype(verts))" for verts in (
[1, 2, 3, 4], ["A", "B", "C", "D"]
)
g = DataGraph(NamedGraph(path_graph(4), verts))
part1, part2, flow = GraphsFlows.mincut(g, verts[1], verts[4])
@test verts[1] β part1
@test verts[4] β part2
@test flow == 1
end
@testset "OrdinalIndexing" begin
g = DataGraph(
NamedGraph(path_graph(3), ["a", "b", "c"]);
vertex_data_eltype=String,
edge_data_eltype=Symbol,
)
g[1st] = "v_a"
g[2nd] = "v_b"
g[3rd] = "v_c"
g[1st => 2nd] = :e_ab
g[2nd => 3rd] = :e_bc
@test g["a"] == "v_a"
@test g["b"] == "v_b"
@test g["c"] == "v_c"
@test g["a" => "b"] === :e_ab
@test g["b" => "a"] === :e_ab
@test g["b" => "c"] === :e_bc
@test g["c" => "b"] === :e_bc
@test g[1st] == "v_a"
@test g[1th] == "v_a"
@test g[2nd] == "v_b"
@test g[2th] == "v_b"
@test g[3rd] == "v_c"
@test g[3th] == "v_c"
@test g[1st => 2nd] === :e_ab
@test g[2nd => 1st] === :e_ab
@test g[2nd => 3rd] === :e_bc
@test g[3rd => 2nd] === :e_bc
end
end
end
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | docs | 684 | # DataGraphs
[](https://mtfishman.github.io/DataGraphs.jl/stable)
[](https://mtfishman.github.io/DataGraphs.jl/dev)
[](https://github.com/mtfishman/DataGraphs.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/mtfishman/DataGraphs.jl)
[](https://github.com/invenia/BlueStyle)
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.2.4 | bbdd1cf21350c99c0848070cc598d6c37f7f26b5 | docs | 187 | ```@meta
CurrentModule = DataGraphs
```
# DataGraphs
Documentation for [DataGraphs](https://github.com/mtfishman/DataGraphs.jl).
```@index
```
```@autodocs
Modules = [DataGraphs]
```
| DataGraphs | https://github.com/ITensor/DataGraphs.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 5854 | """
make.jl
# Description
This file builds the documentation for the AdaptiveResonance.jl package
using Documenter.jl and other tools.
"""
# -----------------------------------------------------------------------------
# DEPENDENCIES
# -----------------------------------------------------------------------------
using
Documenter,
DemoCards,
Logging,
Pkg
# -----------------------------------------------------------------------------
# SETUP
# -----------------------------------------------------------------------------
# Common variables of the script
PROJECT_NAME = "AdaptiveResonance"
DOCS_NAME = "docs"
# Fix GR headless errors
ENV["GKSwstype"] = "100"
# Get the current workind directory's base name
current_dir = basename(pwd())
@info "Current directory is $(current_dir)"
# If using the CI method `julia --project=docs/ docs/make.jl`
# or `julia --startup-file=no --project=docs/ docs/make.jl`
if occursin(PROJECT_NAME, current_dir)
push!(LOAD_PATH, "../src/")
# Otherwise, we are already in the docs project and need to dev the above package
elseif occursin(DOCS_NAME, current_dir)
Pkg.develop(path="..")
# Otherwise, building docs from the wrong path
else
error("Unrecognized docs setup path")
end
# Inlude the local package
using AdaptiveResonance
# using JSON
if haskey(ENV, "DOCSARGS")
for arg in split(ENV["DOCSARGS"])
(arg in ARGS) || push!(ARGS, arg)
end
end
# -----------------------------------------------------------------------------
# DOWNLOAD LARGE ASSETS
# -----------------------------------------------------------------------------
# Point to the raw FileStorage location on GitHub
top_url = raw"https://media.githubusercontent.com/media/AP6YC/FileStorage/main/AdaptiveResonance/"
# List all of the files that we need to use in the docs
files = [
"header.png",
"art.png",
"artmap.png",
"ddvfa.png",
]
# Make a destination for the files, accounting for when folder is AdaptiveResonance.jl
assets_folder = joinpath("src", "assets")
if basename(pwd()) == PROJECT_NAME || basename(pwd()) == PROJECT_NAME * ".jl"
assets_folder = joinpath(DOCS_NAME, assets_folder)
end
download_folder = joinpath(assets_folder, "downloads")
mkpath(download_folder)
download_list = []
# Download the files one at a time
for file in files
# Point to the correct file that we wish to download
src_file = top_url * file * "?raw=true"
# Point to the correct local destination file to download to
dest_file = joinpath(download_folder, file)
# Add the file to the list that we will append to assets
push!(download_list, dest_file)
# If the file isn't already here, download it
if !isfile(dest_file)
download(src_file, dest_file)
@info "Downloaded $dest_file, isfile: $(isfile(dest_file))"
else
@info "File already exists: $dest_file"
end
end
# Downloads debugging
detailed_logger = Logging.ConsoleLogger(stdout, Info, show_limited=false)
with_logger(detailed_logger) do
@info "Current working directory is $(pwd())"
@info "Assets folder is:" readdir(assets_folder, join=true)
# full_download_folder = joinpath(pwd(), "src", "assets", "downloads")
@info "Downloads folder exists: $(isdir(download_folder))"
if isdir(download_folder)
@info "Downloads folder contains:" readdir(download_folder, join=true)
end
end
# -----------------------------------------------------------------------------
# GENERATE
# -----------------------------------------------------------------------------
# Generate the demo files
# this is the relative path to docs/
demopage, postprocess_cb, demo_assets = makedemos("examples")
assets = [
joinpath("assets", "favicon.ico"),
]
# @info "Favicon?"
# @info isfile(joinpath("assets", "favicon.ico"))
# # Add the downloaded files to the assets list
# for file in files
# local_file = joinpath("assets", "downloads", file)
# @info isfile(local_file)
# push!(assets, local_file)
# end
# if there are generated css assets, pass it to Documenter.HTML
isnothing(demo_assets) || (push!(assets, demo_assets))
# Make the documentation
makedocs(
modules=[AdaptiveResonance],
format=Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
assets = assets,
),
pages=[
"Home" => "index.md",
"Getting Started" => [
"getting-started/whatisart.md",
"getting-started/basic-example.md",
],
"Tutorial" => [
"Guide" => "man/guide.md",
demopage,
# "Examples" => "man/examples.md",
"Modules" => "man/modules.md",
"Contributing" => "man/contributing.md",
"Index" => "man/full-index.md",
"Internals" => "man/dev-index.md",
],
],
repo="https://github.com/AP6YC/AdaptiveResonance.jl/blob/{commit}{path}#L{line}",
sitename="AdaptiveResonance.jl",
authors="Sasha Petrenko",
# assets=String[],
)
# 3. postprocess after makedocs
postprocess_cb()
# a workdaround to github action that only push preview when PR has "push_preview" labels
# issue: https://github.com/JuliaDocs/Documenter.jl/issues/1225
# function should_push_preview(event_path = get(ENV, "GITHUB_EVENT_PATH", nothing))
# event_path === nothing && return false
# event = JSON.parsefile(event_path)
# haskey(event, "pull_request") || return false
# labels = [x["name"] for x in event["pull_request"]["labels"]]
# return "push_preview" in labels
# end
# -----------------------------------------------------------------------------
# DEPLOY
# -----------------------------------------------------------------------------
deploydocs(
repo="github.com/AP6YC/AdaptiveResonance.jl.git",
devbranch="develop",
# push_preview = should_push_preview(),
)
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 225 | """
serve.jl
# Description
Convenience script that serves the locally built documentation.
"""
using LiveServer
# Make the documentation
include("make.jl")
# Serve the documentation for development
serve(dir="build")
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 4698 | # ---
# title: ART DataConfig Example
# id: data_config
# cover: ../../assets/downloads/art.png
# date: 2021-12-2
# author: "[Sasha Petrenko](https://github.com/AP6YC)"
# julia: 1.8
# description: This demo illustrates how the data configuration object works for data preprocessing in ART modules that require it.
# ---
# ## Overview
# In their derivations, ART modules have some special requirements when it comes to their input features.
# FuzzyART in particular, and subsequently its derivatives, has a requirement that the inputs be bounded and complement coded.
# This is due to some consequences such as weight decay that occur when using real-valued patterns rather than binary ones (and hence operations like fuzzy membership).
# Preprocessing of the features occurs as follows:
# 1. The features are linearly normalized from 0 to 1 with respect to each feature with `linear_normalization`.
# This is done according to some known bounds that each feature has.
# 2. The features are then complement coded, meaning that the feature vector is appended to its 1-complement (i.e., $x \rightarrow \left[x, 1-x\right]$) with `complement_code`.
# This preprocessing has the ultimate consequence that the input features must be bounded.
# This many not be a problem in some offline applications with a fixed dataset, but in others where the bounds are not known, techniques such as sigmoidal limiting are often used to place an artificial limit.
# ## DataConfig
# Regardless, this process requires some *a-priori* knowledge about the minimums and maximums that each feature can have, which is stored as a preprocessing configuration.
# This preprocessing configuration is saved in every ART module as a `DataConfig` object called `config`, which we can see is uninitialized at first:
## Load the library
using AdaptiveResonance
## Create a new ART module and inspect its uninitialized data config `config`
art = FuzzyART()
art.config
# We see that the type of `art.config` is `DataConfig`.
# We can see what the internal elements of this struct are with `fieldnames`:
fieldnames(AdaptiveResonance.DataConfig)
# We see that the dataconfig has a boolean setup flag, minimum and maximum feature vectors, dimensionality of the data, and the complement coded dimensionality (twice the size of the original dimension).
# ### Automatic Configuration
# In batch training mode, the minimums and maximums are detected automatically; the minimum and maximum values for every feature are saved and used for the preprocessing step at every subsequent iteration.
## Load data
using MLDatasets # Iris dataset
using DataFrames # DataFrames, necessary for MLDatasets.Iris()
using MLDataUtils # Shuffling and splitting
# We will download the Iris dataset for its small size and benchmark use for clustering algorithms.
## Get the iris dataset
iris = Iris(as_df=false)
## Manipulate the features and labels into a matrix of features and a vector of labels
features, labels = iris.features, iris.targets
# Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class:
labels = convertlabel(LabelEnc.Indices{Int}, vec(labels))
unique(labels)
# !!! note
# This automatic detection of feature characteristics only occurs if the `config` is not already setup.
# If it is setup beforehand, then that config is used instead.
# ### Manual Configuration
# As mentioned before, we may not always have the luxury of having a representative dataset in advance.
# Alternatively, we may know the bounds of the features but wish to run incrementally rather than in batch.
# In these cases, we can setup the config the various `DataConfig` constructors.
# For example, if the features are all bounded from -1 to 1, we have to also specify the original dimension of the data in `DataConfig(min, max, dim)`:
## Reinitialize the FuzzyART module
art = FuzzyART()
## Tell the module that we have 20 features all ranging from -1 to 1
art.config = DataConfig(-1, 1, 20)
# If the features differ in their ranges, we can specify with `DataConfig(mins, maxs)`:
## Assume some minimum and maximum values for each feature
mins = [-1,-2,-1.5]
maxs = [3, 2, 1]
art.config = DataConfig(mins, maxs)
# Here, we don't need to specify the feature dimensionality because it is inferred from the length of the range values.
# !!! note
# After the first training run, the weights of the network are set to the size of the complement coded dimension.
# If you wish to change the dimension of the features, you will need to create a new network.
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 8070 | # ---
# title: Incremental vs. Batch Example
# id: incremental_batch
# cover: assets/incremental-batch-cover.png
# date: 2021-12-1
# author: "[Sasha Petrenko](https://github.com/AP6YC)"
# julia: 1.8
# description: This demo illustrates how to use incremental training methods vs. batch training for all ART modules.
# ---
# ## Overview
# All modules in `AdaptiveResonance.jl` are designed to handle incremental and batch training.
# In fact, ART modules are generally incremental in their implementation, so their batch methods wrap the incremental ones and handle preprocessing, etc.
# For example, DDVFA can be run incrementally (i.e. with one sample at a time) with custom algorithmic options and a predetermined data configuration.
# !!! note
# In the incremental case, it is necessary to provide a data configuration if the model is not pretrained because the model has no knowledge of the boundaries and dimensionality of the data, which are necessary in the complement coding step.
# For more info, see the guide in the docs on [incremental vs. batch](@ref incremental_vs_batch).
# ## Data Setup
# We begin with importing AdaptiveResonance for the ART modules and MLDatasets for some data utilities.
using AdaptiveResonance # ART
using MLDatasets # Iris dataset
using DataFrames # DataFrames, necessary for MLDatasets.Iris()
using MLDataUtils # Shuffling and splitting
using Printf # Formatted number printing
# We will download the Iris dataset for its small size and benchmark use for clustering algorithms.
## Get the iris dataset
iris = Iris(as_df=false)
## Manipulate the features and labels into a matrix of features and a vector of labels
features, labels = iris.features, iris.targets
# Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class:
labels = convertlabel(LabelEnc.Indices{Int}, vec(labels))
unique(labels)
# Next, we will create a train/test split with the `MLDataUtils.stratifiedobs` utility:
(X_train, y_train), (X_test, y_test) = stratifiedobs((features, labels))
# ## Incremental vs. Batch
# ### Setup
# Now, we can create several modules to illustrate training one in batch and one incrementaly.
## Create several modules for batch and incremental training.
## We can take advantage of the options instantiation method here to use the same options for both modules.
opts = opts_DDVFA(rho_lb=0.6, rho_ub=0.75)
art_batch = DDVFA(opts)
art_incremental = DDVFA(opts)
# For the incremental version, we must setup the data configuration in advance.
# In batch mode, this is done automatically based upon the provided data, but the incremental variant has not way of knowing the bounds of the individual features.
# We *could* preprocess the data and set the data configuration with `art.config = DataConfig(0, 1, 4)`, which translates to the data containing four features that *all* range from 0 to 1.
# This would be done in scenarios where we have either done some preprocessing on the data or have prior knowledge about the bounds of individual features.
# However, in this example we will let the module determine the bounds with the convenience method `data_setup!`:
## Setup the data config on all of the features.
data_setup!(art_incremental.config, features)
# ### Training
# We can train in batch with a simple supervised mode by passing the labels as a keyword argument.
y_hat_batch_train = train!(art_batch, X_train, y=y_train)
println("Training labels: ", size(y_hat_batch_train), " ", typeof(y_hat_batch_train))
# We can also train incrementally with the same method, being careful that we pass a vector features and a single integer as the labels
## Get the number of training samples
n_train = length(y_train)
## Create a container for the training output labels
y_hat_incremental_train = zeros(Int, n_train)
## Iterate over all training samples
for ix in eachindex(y_train)
sample = X_train[:, ix]
label = y_train[ix]
y_hat_incremental_train[ix] = train!(art_incremental, sample, y=label)
end
# ### Testing
# We can then classify both networks and check that their performances are equivalent.
# For both, we will use the best-matching unit in the case of complete mismatch (see the docs on [Mismatch vs. BMU](@ref mismatch-bmu))
## Classify one model in batch mode
y_hat_batch = AdaptiveResonance.classify(art_batch, X_test, get_bmu=true)
## Classify one model incrementally
n_test = length(y_test)
y_hat_incremental = zeros(Int, n_test)
for ix = 1:n_test
y_hat_incremental[ix] = AdaptiveResonance.classify(art_incremental, X_test[:, ix], get_bmu=true)
end
## Check the shape and type of the output labels
println("Batch testing labels: ", size(y_hat_batch), " ", typeof(y_hat_batch))
println("Incremental testing labels: ", size(y_hat_incremental), " ", typeof(y_hat_incremental))
# Finally, we check the performance (number of correct classifications over total number of test samples) for both models, verifying that they produce the same results.
## Calculate performance on training data, testing data, and with get_bmu
perf_train_batch = performance(y_hat_batch_train, y_train)
perf_train_incremental = performance(y_hat_incremental_train, y_train)
perf_test_batch = performance(y_hat_batch, y_test)
perf_test_incremental = performance(y_hat_incremental, y_test)
## Format each performance number for comparison
@printf "Batch training performance: %.4f\n" perf_train_batch
@printf "Incremental training performance: %.4f\n" perf_train_incremental
@printf "Batch testing performance: %.4f\n" perf_test_batch
@printf "Incremental testing performance: %.4f\n" perf_test_incremental
# ## Visualization
# So we showed that the performance and behavior of modules are identical in incremental and batch modes.
# Great!
# Sadly, illustrating this point doesn't lend itself to visualization in any meaningful way.
# Nonetheless, we would like a pretty picture at the end of the experiment to verify that these identical solutions work in the first place.
# Sanity checks are meaningful in their own right, right?
# To do this, we will reduce the dimensionality of the dataset to two dimensions and show in a scatter plot how the modules classify the test data into groups.
# This will be done with principal component analysis (PCA) to cast the points into a 2-D space while trying to preserve the relative distances between points in the higher dimension.
# The process isn't perfect by any means, but it suffices for visualization.
## Import visualization utilities
using Printf # Formatted number printing
using MultivariateStats # Principal component analysis (PCA)
using Plots # Plotting frontend
gr() # Use the default GR backend explicitly
## Train a PCA model
M = fit(PCA, features; maxoutdim=2)
## Apply the PCA model to the testing set
X_test_pca = MultivariateStats.transform(M, X_test)
# Now that we have the test points cast into a 2-D set of points, we can create a scatter plot that shows how each point is categorized by the modules.
## Create a scatterplot object from the data with some additional formatting options
scatter(
X_test_pca[1, :], # PCA dimension 1
X_test_pca[2, :], # PCA dimension 2
group = y_hat_batch, # labels belonging to each point
markersize = 8, # size of scatter points
legend = false, # no legend
xtickfontsize = 12, # x-tick size
ytickfontsize = 12, # y-tick size
dpi = 300, # Set the dots-per-inch
xlims = :round, # Round up the x-limits to the nearest whole number
xlabel = "\$PCA_1\$", # x-label
ylabel = "\$PCA_2\$", # y-label
title = (@sprintf "DDVFA Iris Clusters"), # formatted title
)
# This plot shows that the DDVFA modules do well at identifying the structure of the three clusters despite not achieving 100% test performance.
png("assets/incremental-batch-cover") #hide
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 8560 | # ---
# title: ART Options Example
# id: options
# cover: assets/options-cover.png
# date: 2021-12-2
# author: "[Sasha Petrenko](https://github.com/AP6YC)"
# julia: 1.8
# description: This demo illustrates how to use options and modify the options for all ART and ARTMAP modules.
# ---
# ## Overview
# The `AdaptiveResonance.jl` package has several ways of handling options for ART modules.
# These methods are meant to give maximum flexibility to the user for sharing and interpreting options, which themselves vary between each module.
# !!! note
# For more info on options in ART modules, see the guide in the docs on [ART options](@ref art_options).
# ## ART Options
# To get a feel for the ART options system, we will inspect different options and their instantiation methods.
# ### Inspection
# First, we load `AdaptiveResonance`:
using AdaptiveResonance
# Every ART module has a default constructor, which can be instantiated in the usual way:
## Create a FuzzyART module with default options
my_fuzzyart = FuzzyART()
typeof(my_fuzzyart)
# Within every ART module is a [Parameters.jl](https://github.com/mauro3/Parameters.jl) struct named `opts` containing the options for the module
## Check the FuzzyART options
my_fuzzyart.opts
# Note that the options here have the type `opts_FuzzyART`.
# This nomenclature is used throughout the module to indicate an options type associated with an ART module.
# For example, the options for a DDVFA module are `opts_DDVFA`:
## Create a DDVFA module and check the type of the options
my_ddvfa = DDVFA()
typeof(my_ddvfa.opts)
# In fact, we can create an instance of these options with a default constructor:
## Create a separate options struct
my_fuzzyart_opts = opts_FuzzyART()
# In addition to the default constructor, we can construct ART modules by instantiating these options and passing them to the module during construction:
## Instantiate an ART module by passing our options
my_fuzzyart = FuzzyART(my_fuzzyart_opts)
my_other_fuzzyart = FuzzyART(my_fuzzyart_opts)
# ### Specifying Options
# Now to the good stuff: because of the behavior of the `Parameters.jl` type, each option has a default value that we can modify during instantiation with keyword arguments:
## Change some of the default FuzzyART options
my_fuzzyart_opts = opts_FuzzyART(
rho=0.6,
gamma_normalization=true
)
my_fuzzyart = FuzzyART(my_fuzzyart_opts)
# As some syntactic sugar, we can pass these keyword arguments directly to the module during instantiation if we have no need to share option structs:
## Pass these keyword arguments to the module directly
my_fuzzyart = FuzzyART(
rho=0.6,
gamma_normalization=true
)
# Before training, we can also instantiate the model and alter the options afterward:
my_fuzzyart = FuzzyART()
my_fuzzyart.opts.rho=0.6
# !!! note
# All ART modules are designed to use this options struct internally when the parameters are needed.
# It is possible to change these parameters in the middle of training and evaluation, but some algorithmic instability may occur.
# ## Comparison
# To see the effect that changing these parameters has on the modules, we can train and test them side-by-side.
# We begin with importing AdaptiveResonance for the ART modules and MLDatasets for some data utilities.
using MLDatasets # Iris dataset
using DataFrames # DataFrames, necessary for MLDatasets.Iris()
using MLDataUtils # Shuffling and splitting
using Printf # Formatted number printing
using MultivariateStats # Principal component analysis (PCA)
using Plots # Plotting frontend
gr() # Use the default GR backend explicitly
# We will download the Iris dataset for its small size and benchmark use for clustering algorithms.
## Get the iris dataset
iris = Iris(as_df=false)
## Manipulate the features and labels into a matrix of features and a vector of labels
features, labels = iris.features, iris.targets
# Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class:
labels = convertlabel(LabelEnc.Indices{Int}, vec(labels))
unique(labels)
# Next, we will create a train/test split with the `MLDataUtils.stratifiedobs` utility:
(X_train, y_train), (X_test, y_test) = stratifiedobs((features, labels))
# Now we can create several FuzzyART modules with different options.
## Create two FuzzyARTs with different vigilance values and suppressing logging messages
rho_1 = 0.5
rho_2 = 0.7
my_fuzzyart_1 = FuzzyART(rho=rho_1, display=false)
my_fuzzyart_2 = FuzzyART(rho=rho_2, display=false)
# Here, we will train these FuzzyART modules in simple supervised mode by passing the supervised labels as a keyword argument:
## Train in simple supervised mode by passing the labels as a keyword argument.
y_hat_train_1 = train!(my_fuzzyart_1, X_train, y=y_train)
y_hat_train_2 = train!(my_fuzzyart_2, X_train, y=y_train)
# We then classify the test data with both modules:
y_hat_1 = AdaptiveResonance.classify(my_fuzzyart_1, X_test, get_bmu=true)
y_hat_2 = AdaptiveResonance.classify(my_fuzzyart_2, X_test, get_bmu=true)
## Check the shape and type of the output labels
println("FuzzyART 1 labels: ", size(y_hat_1), " ", typeof(y_hat_1))
println("FuzzyART 2 labels: ", size(y_hat_2), " ", typeof(y_hat_2))
## Calculate the performance on the test data
perf_test_1 = performance(y_hat_1, y_test)
perf_test_2 = performance(y_hat_2, y_test)
## Format each performance number for comparison
@printf "Testing performance rho=%.1f: %.4f\n" rho_1 perf_test_1
@printf "Testing performance rho=%.1f: %.4f\n" rho_2 perf_test_2
# In addition to having different performances, we can see that there is a subsequent trade-off in the number of categories used:
## Print the number of categories for each vigilance parameter
@printf "Number of categories rho=%.1f: %i\n" rho_1 my_fuzzyart_1.n_categories
@printf "Number of categories rho=%.1f: %i\n" rho_2 my_fuzzyart_2.n_categories
# The variation between vigilance parameter, number of categories created during learning, and testing performance/generalization is a central theme in ART-based algorithms.
# ## Visualization
# Now, to visualize how the two models differ in how they partition the data, we can use principal component analysis (PCA) to compress to two plotting dimensions.
# PCA is a method to represent a dataset in a different number of dimensions while preserving the relative separation between datapoints.
# Though most datasets are not able to be effectively transformed down to two dimensions, this technique is useful to get a general sense of how well separated the classes are and how well your algorithm classifies them.
## Train a PCA model to visually separate the features in two dimensions.
M = fit(PCA, features; maxoutdim=2)
## Apply the PCA model to the testing set
X_test_pca = MultivariateStats.transform(M, X_test)
# We can now plot the PCA'ed test set and label them according to the two FuzzyART's
# We will do so by creating a function for the subplots first as they will share the same format, and we dare not duplicate code.
# Then, we will plot those subplots side-by-side.
## Create a function for our subplots
function fuzzyart_scatter(data, labels, rho)
p = scatter(
data[1, :], # PCA dimension 1
data[2, :], # PCA dimension 2
group=labels, # labels belonging to each point
markersize=8, # size of scatter points
xlims = [-4, 4], # manually set the x-limits
title=(@sprintf "FuzzyART \$\\rho\$ = %.1f" rho), # formatted title
)
return p
end
## Create the two scatterplot objects
p1 = fuzzyart_scatter(X_test_pca, y_hat_1, rho_1)
p2 = fuzzyart_scatter(X_test_pca, y_hat_2, rho_2)
## Plot the two scatterplots together
plot(
p1, p2, # scatterplot objects
layout = (1, 2), # plot side-by-side
##layout = [a, b], # plot side-by-side
legend = false, # no legend
xtickfontsize = 12, # x-tick size
ytickfontsize = 12, # y-tick size
xlabel = "\$PCA_1\$", # x-label
ylabel = "\$PCA_2\$", # y-label
dpi = 300, # Set the dots-per-inch
)
# We can see that the two different vigilance values result in similar resutls on the whole, though they differ in how they classify certain samples that straddle the border between
png("assets/options-cover") #hide
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 3782 | # ---
# title: Supervised DDVFA Example
# id: ddvfa_supervised
# cover: ../../assets/downloads/ddvfa.png
# date: 2021-11-30
# author: "[Sasha Petrenko](https://github.com/AP6YC)"
# julia: 1.8
# description: This demo shows how to use DDVFA for simple supervised learning by clustering Iris samples and mapping the modules internal categories to the true labels.
# ---
# DDVFA is an unsupervised clustering algorithm by definition, but it can be adaptived for supervised learning by mapping the module's internal categories to the true labels.
# ART modules such as DDVFA can also be used in simple supervised mode where provided labels are used in place of internal incremental labels for the clusters, providing a method of assessing the clustering performance when labels are available.
# We begin with importing AdaptiveResonance for the ART modules and MLDatasets for some data utilities.
using AdaptiveResonance # ART
using MLDatasets # Iris dataset
using DataFrames # DataFrames, necessary for MLDatasets.Iris()
using MLDataUtils # Shuffling and splitting
using Printf # Formatted number printing
# We will download the Iris dataset for its small size and benchmark use for clustering algorithms.
## Get the iris dataset
iris = Iris(as_df=false)
## Manipulate the features and labels into a matrix of features and a vector of labels
features, labels = iris.features, iris.targets
# Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class:
labels = convertlabel(LabelEnc.Indices{Int}, vec(labels))
unique(labels)
# Next, we will create a train/test split with the `MLDataUtils.stratifiedobs` utility:
(X_train, y_train), (X_test, y_test) = stratifiedobs((features, labels))
# Now, we can create our DDVFA module.
# We'll do so with the default contstructor, though the module itself has many options that you can alter during instantiation.
art = DDVFA()
# We can train the model in batch mode upon the data in a simple supervised mode.
# We do so by passing the integer vector of labels to the training method with the simple keyword `y`.
# Just as in unsupervised training, we can extract the module's prescribed labels from the training method, which should match up to the training labels as we will see later.
## Train in simple supervised mode by passing the labels as a keyword argument.
y_hat_train = train!(art, X_train, y=y_train)
println("Training labels: ", size(y_hat_train), " ", typeof(y_hat_train))
# We can classify the testing data to see how we generalize.
# At the same time, we can see the effect of getting the best-matching unit in the case of complete mismatch (see the docs on [Mismatch vs. BMU](@ref mismatch-bmu))
## Classify both ways
y_hat = AdaptiveResonance.classify(art, X_test)
y_hat_bmu = AdaptiveResonance.classify(art, X_test, get_bmu=true)
## Check the shape and type of the output labels
println("Testing labels: ", size(y_hat), " ", typeof(y_hat))
println("Testing labels with bmu: ", size(y_hat_bmu), " ", typeof(y_hat_bmu))
# Finally, we can calculate the performances (number correct over total) of the model upon all three regimes:
# 1. Training data
# 2. Testing data
# 2. Testing data with `get_bmu=true`
## Calculate performance on training data, testing data, and with get_bmu
perf_train = performance(y_hat_train, y_train)
perf_test = performance(y_hat, y_test)
perf_test_bmu = performance(y_hat_bmu, y_test)
## Format each performance number for comparison
@printf "Training performance: %.4f\n" perf_train
@printf "Testing performance: %.4f\n" perf_test
@printf "Best-matching unit testing performance: %.4f\n" perf_test_bmu
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1963 | # ---
# title: Unsupervised DDVFA Example
# id: ddvfa_unsupervised
# cover: ../../assets/downloads/ddvfa.png
# date: 2021-11-30
# author: "[Sasha Petrenko](https://github.com/AP6YC)"
# julia: 1.8
# description: This demo shows how to use DDVFA for unsupervised learning by clustering Iris samples.
# ---
# DDVFA is an unsupervised clustering algorithm by definition, so it can be used to cluster a set of samples all at once in batch mode.
# We begin with importing AdaptiveResonance for the ART modules and MLDatasets for loading some data.
using AdaptiveResonance # ART
using MLDatasets # Iris dataset
using DataFrames # DataFrames, necessary for MLDatasets.Iris()
using MLDataUtils # Shuffling and splitting
# We will download the Iris dataset for its small size and benchmark use for clustering algorithms.
## Get the iris dataset
iris = Iris(as_df=false)
## Extract the features into a local variable
features = iris.features
# Next, we will instantiate a DDVFA module.
# We could create an options struct for reuse with `opts=opts_DDVFA(...)`, but for now we will use the direct keyword arguments approach.
art = DDVFA(rho_lb=0.6, rho_ub=0.75)
# To train the module on the training data, we use `train!`.
# The train method returns the prescribed cluster labels, which are just what the algorithm believes are unique/separate cluster.
# This is because we are doing *unsupervised* learning rather than supervised learning with known labels.
y_hat_train = train!(art, features)
# Though we could inspect the unique entries in the list above, we can see the number of categories directly from the art module.
art.n_categories
# Because DDVFA actually has FuzzyART modules for F2 nodes, each category has its own category prototypes.
# We can see the total number of weights in the DDVFA module by summing `n_categories` across all F2 nodes.
total_vec = [art.F2[i].n_categories for i = 1:art.n_categories]
total_cat = sum(total_vec)
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 3602 | # ---
# title: Supervised Simplified FuzzyARTMAP (SFAM) Example
# id: sfam_iris
# cover: ../../assets/downloads/artmap.png
# date: 2021-11-30
# author: "[Sasha Petrenko](https://github.com/AP6YC)"
# julia: 1.8
# description: This demo shows how to use a Simplified FuzzyARTMAP (SFAM) module to conduct supervised learning on the Iris dataset.
# ---
# SFAM is a supervised algorithm by definition, so we use it to map a set of features to a set of supervisory labels.
# We will do so by training and testing on the ubiquitous Iris dataset and seeing how well the SFAM module generalizes the data.
# We begin with importing AdaptiveResonance for the ART modules and MLDatasets for some data utilities.
using AdaptiveResonance # ART
using MLDatasets # Iris dataset
using MLDataUtils # Shuffling and splitting
using Printf # Formatted number printing
# We will download the Iris dataset for its small size and benchmark use for clustering algorithms.
## Get the iris dataset as a DataFrame
iris = Iris()
## Manipulate the features and labels into a matrix of features and a vector of labels
features, labels = Matrix(iris.features)', vec(Matrix{String}(iris.targets))
# Because the MLDatasets package gives us Iris labels as strings, we will use the `MLDataUtils.convertlabel` method with the `MLLabelUtils.LabelEnc.Indices` type to get a list of integers representing each class:
labels = convertlabel(LabelEnc.Indices{Int}, labels)
unique(labels)
# Next, we will create a train/test split with the `MLDataUtils.stratifiedobs` utility:
(X_train, y_train), (X_test, y_test) = stratifiedobs((features, labels))
# Now, we can create our SFAM module.
# We'll do so with the default contstructor, though the module itself has many options that can be altered during instantiation.
## Create the SFAM module
art = SFAM()
## Change the match tracking parameter after instantiation
art.opts.epsilon = 1e-2
# We can train the model in batch mode upon the data and supervisory labels.
# We do so by directly passing the integer vector of labels to the training method.
# Just as in other modules, we can extract the SFAM's prescribed labels from the training method, which should match up to the training labels as we will see later.
## Train in supervised mode by directly passing the labels.
y_hat_train = train!(art, X_train, y_train)
println("Training labels: ", size(y_hat_train), " ", typeof(y_hat_train))
# We can classify the testing data to see how we generalize.
# At the same time, we can see the effect of getting the best-matching unit in the case of complete mismatch (see the docs on [Mismatch vs. BMU](@ref mismatch-bmu))
## Classify both ways
y_hat = AdaptiveResonance.classify(art, X_test)
y_hat_bmu = AdaptiveResonance.classify(art, X_test, get_bmu=true)
## Check the shape and type of the output labels
println("Testing labels: ", size(y_hat), " ", typeof(y_hat))
println("Testing labels with bmu: ", size(y_hat_bmu), " ", typeof(y_hat_bmu))
# Finally, we can calculate the performances (number correct over total) of the model upon all three regimes:
# 1. Training data
# 2. Testing data
# 2. Testing data with `get_bmu=true`
## Calculate performance on training data, testing data, and with get_bmu
perf_train = performance(y_hat_train, y_train)
perf_test = performance(y_hat, y_test)
perf_test_bmu = performance(y_hat_bmu, y_test)
## Format each performance number for comparison
@printf "Training performance: %.4f\n" perf_train
@printf "Testing performance: %.4f\n" perf_test
@printf "Best-matching unit testing performance: %.4f\n" perf_test_bmu
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 6102 | """
Main module for `AdaptiveResonance.jl`, a Julia package of adaptive resonance theory algorithms.
This module exports all of the ART modules, options, and utilities used by the `AdaptiveResonance.jl package.`
For full usage, see the official guide at https://ap6yc.github.io/AdaptiveResonance.jl/dev/man/guide/.
# Basic Usage
Install and import the package in a script with
```julia
using Pkg
Pkg.add("AdaptiveResonance")
using AdaptiveResonance
```
then create an ART module with default options
```julia
my_art = DDVFA()
```
or custom options via keyword arguments
```julia
my_art = DDVFA(rho_ub=0.45, rho_ub=0.7)
```
Train all models with `train!` and conduct inference with `classify`.
In batch, samples are interpreted in the Julia column-major fashion with dimensions `(n_dim, n_samples)` (i.e., columns are samples).
Train unsupervised ART modules incrementally or in batch with optional labels as a keyword argument `y`
```julia
# Load your data somehow
samples, labels = load_some_data()
# Unsupervised batch
train!(my_art, samples)
# Supervised batch
train!(my_art, samples, y=labels)
# Unsupervised incremental
for ix in eachindex(labels)
train!(my_art, samples[:, ix])
end
# Supervised incremental
for ix in eachindex(labels)
train!(my_art, samples[:, ix], y=labels[ix])
end
```
Train supervised ARTMAP with positional arguments
```julia
my_artmap = SFAM()
train!(my_artmap, samples, labels)
```
With either module, conduct inference with `classify(art, samples)`
```julia
# Batch inference
y_hat = classify(my_art, test_samples)
# Incremental inference
for ix in eachindex(test_labels)
y_hat[ix] = classify(my_artmap, test_samples[:, ix])
end
```
# Imports
The following names are imported by the package as dependencies:
$(IMPORTS)
# Exports
The following names are exported and available when `using` the package:
$(EXPORTS)
"""
module AdaptiveResonance
# -----------------------------------------------------------------------------
# DEPENDENCIES
# -----------------------------------------------------------------------------
# Usings/imports for the whole package declared once
# Full usings (which supports comma-separated import notation)
using
DocStringExtensions, # Docstring utilities
ElasticArrays, # Fast resizable arrays
Logging, # Logging utils used as main method of terminal reporting
NumericalTypeAliases, # Abstract type aliases
Parameters, # ARTopts are parameters (@with_kw)
ProgressBars # Provides progress bars for training and inference
# Specific identifiers
using LinearAlgebra: norm # Trace and norms
# Medians and mean for linkage methods
using Statistics: median as statistics_median
using Statistics: mean as statistics_mean
# Precompile concrete type methods
using PrecompileSignatures: @precompile_signatures
# -----------------------------------------------------------------------------
# INCLUDES
# -----------------------------------------------------------------------------
# Include all files
# include("common.jl") # Objects shared by all modules
# Common objects and functions shared by all modules
include("lib/lib.jl")
# Exported constant for the version of the package
include("version.jl")
# Supervised ART modules
include("ARTMAP/ARTMAP.jl")
# Unsupervised ART modules
include("ART/ART.jl")
# -----------------------------------------------------------------------------
# DERIVATIVE TYPES AND CONSTANTS
# -----------------------------------------------------------------------------
"""
A combined list of all unsupervised ART and supervised ARTMAP modules from the `AdaptiveResonance.jl` package.
"""
const ADAPTIVERESONANCE_MODULES = [
ART_MODULES;
ARTMAP_MODULES;
]
# -----------------------------------------------------------------------------
# EXPORTS
# -----------------------------------------------------------------------------
# Export all public names
export
# Abstract types
ARTOpts, # All module options are ARTOpts
ARTModule, # All modules are ART modules
ART, # ART modules (unsupervised)
ARTMAP, # ARTMAP modules (supervised)
# Algorithmic functions
train!, # Train models: train!(art, data, y=[])
classify, # Inference: classify(art, data)
performance, # Classification accuracy: performance(y, y_hat)
# Common structures
DataConfig, # ART data configs (feature ranges, dimension, etc.)
data_setup!, # Correctly set up an ART data configuration
# Common utility functions
complement_code, # Map x -> [x, 1 - x] and normalize to [0, 1]
get_data_characteristics, # Get characteristics of x, used by data configs
linear_normalization, # Normalize x to [0, 1]
# ART (unsupervised)
FuzzyART, opts_FuzzyART,
DDVFA, opts_DDVFA, get_W,
DVFA, opts_DVFA,
# ART variants
GammaNormalizedFuzzyART, opts_GammaNormalizedFuzzyART,
# ARTMAP (supervised)
FAM, opts_FAM,
SFAM, opts_SFAM,
# ARTMAP variants
DAM, opts_DAM,
# Useful constants
ART_MODULES, # List of (default) unsupervised ART modules
ARTMAP_MODULES, # List of supervised ARTMAP modules
ADAPTIVERESONANCE_MODULES, # Combined list of ART and ARTMAP modules
DDVFA_METHODS, # DDVFA linkage methods
ADAPTIVERESONANCE_VERSION, # Version of the package
MATCH_FUNCTIONS, # All match functions as a list of symbols
ACTIVATION_FUNCTIONS, # All activation functions as a list of symbols
UPDATE_FUNCTIONS, # All update functions as a list of symbols
# ARTSCENE filter functions
artscene_filter # Runs all of the ARTSCENE functions in one step, returning features
# -----------------------------------------------------------------------------
# PRECOMPILE
# -----------------------------------------------------------------------------
# Precompile any concrete-type function signatures
@precompile_signatures(AdaptiveResonance)
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1046 | """
version.jl
# Description
Borrowed from MLJ.jl, this defines the version of the package as a constant in the module.
# Authors
- Sasha Petrenko <[email protected]>
# Credits
- MLJ.jl: https://github.com/alan-turing-institute/MLJ.jl
"""
# -----------------------------------------------------------------------------
# DEPENDENCIES
# -----------------------------------------------------------------------------
using Pkg
# -----------------------------------------------------------------------------
# CONSTANTS
# -----------------------------------------------------------------------------
"""
A constant that contains the version of the installed AdaptiveResonance.jl package.
This value is computed at compile time, so it may be used to programmatically verify the version of `AdaptiveResonance` that is installed in case a `compat` entry in your Project.toml is missing or otherwise incorrect.
"""
const ADAPTIVERESONANCE_VERSION = VersionNumber(
Pkg.TOML.parsefile(joinpath(dirname(@__DIR__), "Project.toml"))["version"]
)
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 941 | """
ART.jl
# Description
Includes all of the unsupervised ART modules definitions.
"""
# -----------------------------------------------------------------------------
# INCLUDES
# -----------------------------------------------------------------------------
# Common code for all ART modules
include("common.jl")
# Single (not distributed) ART modules
include("single/single.jl")
# Distributed ART modules
include("distributed/distributed.jl")
# Convenience constructors of variants of ART modules
include("variants.jl")
# -----------------------------------------------------------------------------
# AGGREGATIONS
# -----------------------------------------------------------------------------
"""
A list of (default) unsupervised ART modules that are available in the `AdaptiveResonance.jl` package.
"""
const ART_MODULES = [
# Core modules
FuzzyART,
DVFA,
DDVFA,
# Variants
GammaNormalizedFuzzyART,
]
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 3526 | """
common.jl
# Description:
Includes all of the unsupervised ART modules common code.
"""
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
"""
Train the ART model on a batch of data 'x' with optional supervisory labels 'y.'
# Arguments
- `art::ART`: the unsupervised ART model to train.
- `x::RealMatrix`: the 2-D dataset containing columns of samples with rows of features.
- `y::IntegerVector=Int[]`: optional, labels for simple supervisory training.
- `preprocessed::Bool=false`: optional, flag if the data has already been complement coded or not.
"""
function train!(art::ART, x::RealMatrix ; y::IntegerVector = Int[], preprocessed::Bool=false)
# Show a message if display is on
art.opts.display && @info "Training $(typeof(art))"
# Flag for if training in supervised mode
supervised = !isempty(y)
# Data information and setup
n_samples = get_n_samples(x)
# Run the batch initialization procedure
x = init_train!(x, art, preprocessed)
# Initialize the output vector
y_hat = zeros(Int, n_samples)
# Learn until the stopping conditions
art.epoch = 0
while true
# Increment the epoch and get the iterator
art.epoch += 1
iter = get_iterator(art.opts, n_samples)
for i = iter
# Update the iterator if necessary
update_iter(art, iter, i)
# Grab the sample slice
sample = get_sample(x, i)
# Select the label to pass to the incremental method
local_y = supervised ? y[i] : 0
# Train upon the sample and label
y_hat[i] = train!(art, sample, y=local_y, preprocessed=true)
end
# Check stopping conditions
if stopping_conditions(art)
break
end
end
return y_hat
end
# """
# Checks the stopping conditions for an ART module.
# # Arguments
# - `art::ART`: the ART module to check stopping conditions for.
# """
# function stopping_conditions(art::ART)
# return art.epoch >= art.opts.max_epoch
# end
# -----------------------------------------------------------------------------
# COMMON DOCUMENTATION
# -----------------------------------------------------------------------------
@doc raw"""
Train the ART model on a single sample of features 'x' with an optional supervisory label.
# Arguments
- `art::ART`: the unsupervised ART model to train.
- `x::RealVector`: the single sample feature vector to train upon.
- `y::Integer=0`: optional, a label for simple supervisory training.
- `preprocessed::Bool=false`: optional, flag if the data has already been complement coded or not.
"""
train!(art::ART, x::RealVector ; y::Integer=0, preprocessed::Bool=false)
@doc raw"""
Initializes the ART module for training with sample 'x' and optional label 'y', setting up the data configuration and instantiating the first category.
This function is used during the first training iteration when the ART module is empty.
# Arguments
- `art::ART`: the ART module to initialize.
- `x::RealVector`: the sample to use for initialization.
- `y::Integer=0`: the optional new label for the first weight of the ART module. If not specified, defaults the new label to 1.
# Examples
```julia-repl
julia> my_FuzzyART = FuzzyART()
FuzzyART
opts: opts_FuzzyART
...
julia> initialize!(my_FuzzyART, [1, 2, 3, 4])
"""
initialize!(art::ART, x::RealVector ; y::Integer=0)
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1769 | """
variants.jl
# Description
Includes convenience constructors for common variants of various ART modules.
"""
# -----------------------------------------------------------------------------
# GAMMA-NORMALIZED FUZZYART
# -----------------------------------------------------------------------------
# Shared variant statement for GNFA
const _VARIANT_STATEMENT_GammaNormalizedFuzzyART = """
GammaNormalizedFuzzyART is a variant of [`FuzzyART`](@ref), using the [`AdaptiveResonance.opts_FuzzyART`](@ref) options.
This constructor passes `gamma_normalization=true`, which internally uses `match=:gamma_match` and `activation=:gamma_activation` in addition to the keyword argument options you provide.
"""
"""
Constructs a Gamma-Normalized FuzzyART module as a variant of FuzzyART by using the gamma_normalization option.
$(_VARIANT_STATEMENT_GammaNormalizedFuzzyART)
# Arguments
- `kwargs`: keyword arguments of FuzzyART options (see [`AdaptiveResonance.opts_FuzzyART`](@ref))
"""
function GammaNormalizedFuzzyART(;kwargs...)
# Return a FuzzyART module with gamma_normalization high in addition to other passed keyword arguments
return FuzzyART(;gamma_normalization=true, kwargs...)
end
"""
Implements a Gamma-Normalized FuzzyART module with specified options.
$(_VARIANT_STATEMENT_GammaNormalizedFuzzyART)
# Arguments
- `opts::opts_FuzzyART`: the Fuzzy ART options (see [`AdaptiveResonance.opts_FuzzyART`](@ref)).
"""
function GammaNormalizedFuzzyART(opts::opts_FuzzyART)
return SFAM(opts)
end
"""
Implements a Gamma-Normalized FuzzyART module's options.
$(_VARIANT_STATEMENT_GammaNormalizedFuzzyART)
$(_OPTS_DOCSTRING)
"""
function opts_GammaNormalizedFuzzyART(;kwargs...)
return opts_FuzzyART(;choice_by_difference=true, kwargs...)
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 106 | """
common.jl
# Description
Contains all common code for distributed ART modules, such as DDVFA.
"""
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 475 | """
distributed.jl
# Description
Aggregates common code and all modules for distributed ART modules, such as DDVFA.
"""
# -----------------------------------------------------------------------------
# DEPENDENCIES
# -----------------------------------------------------------------------------
# Common code for distributed ART modules
include("common.jl")
# Distributed Dual-Vigilance FuzzyART
include("modules/DDVFA.jl")
# MergeART
include("modules/MergeART.jl")
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 17061 | """
DDVFA.jl
# Description
Includes all of the structures and logic for running a Distributed Dual-Vigilance Fuzzy ART (DDVFA) module.
# References
1. L. E. Brito da Silva, I. Elnabarawy, and D. C. Wunsch, 'Distributed dual vigilance fuzzy adaptive resonance theory learns online, retrieves arbitrarily-shaped clusters, and mitigates order dependence,' Neural Networks, vol. 121, pp. 208-228, 2020, doi: 10.1016/j.neunet.2019.08.033.
2. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
# -----------------------------------------------------------------------------
# OPTIONS
# -----------------------------------------------------------------------------
"""
Distributed Dual Vigilance Fuzzy ART options struct.
$(_OPTS_DOCSTRING)
"""
@with_kw mutable struct opts_DDVFA <: ARTOpts @deftype Float
"""
Lower-bound vigilance parameter: rho_lb β [0, 1].
"""
rho_lb = 0.7; @assert rho_lb >= 0.0 && rho_lb <= 1.0
"""
Upper bound vigilance parameter: rho_ub β [0, 1].
"""
rho_ub = 0.85; @assert rho_ub >= 0.0 && rho_ub <= 1.0
"""
Choice parameter: alpha > 0.
"""
alpha = 1e-3; @assert alpha > 0.0
"""
Learning parameter: beta β (0, 1].
"""
beta = 1.0; @assert beta > 0.0 && beta <= 1.0
"""
Pseudo kernel width: gamma >= 1.
"""
gamma = 3.0; @assert gamma >= 1.0
"""
Reference gamma for normalization: 0 <= gamma_ref < gamma.
"""
gamma_ref = 1.0; @assert 0.0 <= gamma_ref && gamma_ref < gamma
"""
Similarity method (activation and match): similarity β [:single, :average, :complete, :median, :weighted, :centroid].
"""
similarity::Symbol = :single
"""
Maximum number of epochs during training: max_epochs β (1, Inf).
"""
max_epoch::Int = 1
"""
Display flag for progress bars.
"""
display::Bool = false
"""
Flag to normalize the threshold by the feature dimension.
"""
gamma_normalization::Bool = true
"""
Flag to use an uncommitted node when learning.
If true, new weights are created with ones(dim) and learn on the complement-coded sample.
If false, fast-committing is used where the new weight is simply the complement-coded sample.
"""
uncommitted::Bool = false
"""
Selected activation function.
"""
activation::Symbol = :gamma_activation
"""
Selected match function.
"""
match::Symbol = :gamma_match
"""
Selected weight update function.
"""
update::Symbol = :basic_update
"""
Flag to sort the F2 nodes by activation before the match phase
When true, the F2 nodes are sorted by activation before match.
When false, an iterative argmax and inhibition procedure is used to find the best-matching unit.
"""
sort::Bool = false
end
# -----------------------------------------------------------------------------
# STRUCTS
# -----------------------------------------------------------------------------
"""
Distributed Dual Vigilance Fuzzy ARTMAP module struct.
For module options, see [`AdaptiveResonance.opts_DDVFA`](@ref).
# References
1. L. E. Brito da Silva, I. Elnabarawy, and D. C. Wunsch, 'Distributed dual vigilance fuzzy adaptive resonance theory learns online, retrieves arbitrarily-shaped clusters, and mitigates order dependence,' Neural Networks, vol. 121, pp. 208-228, 2020, doi: 10.1016/j.neunet.2019.08.033.
2. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
mutable struct DDVFA <: ART
# Option Parameters
"""
DDVFA options struct.
"""
opts::opts_DDVFA
"""
FuzzyART options struct used for all F2 nodes.
"""
subopts::opts_FuzzyART
"""
Data configuration struct.
"""
config::DataConfig
# Working variables
"""
Operating module threshold value, a function of the vigilance parameter.
"""
threshold::Float
"""
List of F2 nodes (themselves FuzzyART modules).
"""
F2::Vector{FuzzyART}
"""
Incremental list of labels corresponding to each F2 node, self-prescribed or supervised.
"""
labels::ARTVector{Int}
"""
Number of total categories.
"""
n_categories::Int
"""
Current training epoch.
"""
epoch::Int
"""
DDVFA activation values.
"""
T::ARTVector{Float}
"""
DDVFA match values.
"""
M::ARTVector{Float}
"""
Runtime statistics for the module, implemented as a dictionary containing entries at the end of each training iteration.
These entries include the best-matching unit index and the activation and match values of the winning node.
"""
stats::ARTStats
end
# -----------------------------------------------------------------------------
# CONSTRUCTORS
# -----------------------------------------------------------------------------
"""
Implements a DDVFA learner with optional keyword arguments.
# Arguments
- `kwargs`: keyword arguments to pass to the DDVFA options struct (see [`AdaptiveResonance.opts_DDVFA`](@ref).)
# Examples
By default:
```julia-repl
julia> DDVFA()
DDVFA
opts: opts_DDVFA
subopts: opts_FuzzyART
...
```
or with keyword arguments:
```julia-repl
julia> DDVFA(rho_lb=0.4, rho_ub = 0.75)
DDVFA
opts: opts_DDVFA
subopts: opts_FuzzyART
...
```
"""
function DDVFA(;kwargs...)
opts = opts_DDVFA(;kwargs...)
DDVFA(opts)
end
"""
Implements a DDVFA learner with specified options.
# Arguments
- `opts::opts_DDVFA`: the DDVFA options (see [`AdaptiveResonance.opts_DDVFA`](@ref)).
# Examples
```julia-repl
julia> my_opts = opts_DDVFA()
julia> DDVFA(my_opts)
DDVFA
opts: opts_DDVFA
subopts: opts_FuzzyART
...
```
"""
function DDVFA(opts::opts_DDVFA)
# Set the options used for all F2 FuzzyART modules
subopts = opts_FuzzyART(
rho=opts.rho_ub,
gamma=opts.gamma,
gamma_ref=opts.gamma_ref,
gamma_normalization=opts.gamma_normalization,
uncommitted=opts.uncommitted,
display=false,
activation=opts.activation,
match=opts.match,
update=opts.update,
sort=opts.sort,
)
# Construct the DDVFA module
DDVFA(
opts, # opts
subopts, # subopts
DataConfig(), # config
0.0, # threshold
Vector{FuzzyART}(undef, 0), # F2
ARTVector{Int}(undef, 0), # labels
0, # n_categories
0, # epoch
ARTVector{Float}(undef, 0), # T
ARTVector{Float}(undef, 0), # M
build_art_stats(), # stats
)
end
# -----------------------------------------------------------------------------
# COMMON FUNCTIONS
# -----------------------------------------------------------------------------
# COMMON DOC: Set threshold function
function set_threshold!(art::DDVFA)
# Gamma match normalization
if art.opts.gamma_normalization
# Set the learning threshold as a function of the data dimension
art.threshold = art.opts.rho_lb * (art.config.dim ^ art.opts.gamma_ref)
else
# Set the learning threshold as simply the vigilance parameter
art.threshold = art.opts.rho_lb
end
end
# COMMON DOC: DDVFA incremental training method
function train!(art::DDVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=false)
# Flag for if training in supervised mode
supervised = !iszero(y)
# Run the sequential initialization procedure
sample = init_train!(x, art, preprocessed)
# Initialization
if isempty(art.F2)
# Set the threshold
set_threshold!(art)
# Set the first label as either 1 or the first provided label
y_hat = supervised ? y : 1
# Create a new category
create_category!(art, sample, y_hat)
return y_hat
end
# Default to mismatch
mismatch_flag = true
y_hat = -1
# Compute the activation for all categories
accommodate_vector!(art.T, art.n_categories)
for jx = 1:art.n_categories
activation_match!(art.F2[jx], sample)
art.T[jx] = similarity(art.opts.similarity, art.F2[jx], sample, true)
end
# Compute the match for each category in the order of greatest activation
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
accommodate_vector!(art.M, art.n_categories)
for jx = 1:art.n_categories
# Best matching unit
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Compute the match with the similarity linkage method
art.M[bmu] = similarity(art.opts.similarity, art.F2[bmu], sample, false)
# If we got a match, then learn (update the category)
if art.M[bmu] >= art.threshold
# If supervised and the label differs, trigger mismatch
if supervised && (art.labels[bmu] != y)
break
end
# Update the weights with the sample
train!(art.F2[bmu], sample, preprocessed=true)
# Save the output label for the sample
y_hat = art.labels[bmu]
# No mismatch
mismatch_flag = false
break
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If we triggered a mismatch
if mismatch_flag
# Keep the bmu as the top activation despite creating a new category
bmu = top_bmu
# Get the correct label
y_hat = supervised ? y : art.n_categories + 1
# Create a new category
create_category!(art, sample, y_hat)
end
# Log the stats
log_art_stats!(art, bmu, mismatch_flag)
return y_hat
end
# COMMON DOC: DDVFA incremental classification method
function classify(art::DDVFA, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false)
# Preprocess the data
sample = init_classify!(x, art, preprocessed)
# Calculate all global activations
accommodate_vector!(art.T, art.n_categories)
for jx = 1:art.n_categories
# Update the F2 node's activation and match values
activation_match!(art.F2[jx], sample)
# Update the DDVFA activation with the similarity linkage method
art.T[jx] = similarity(art.opts.similarity, art.F2[jx], sample, true)
end
# Sort by highest activation
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
# Default to mismatch
mismatch_flag = true
y_hat = -1
# Iterate over the list of activations
accommodate_vector!(art.M, art.n_categories)
for jx = 1:art.n_categories
# Get the best-matching unit
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Get the match value of this activation
art.M[bmu] = similarity(art.opts.similarity, art.F2[bmu], sample, false)
# If the match satisfies the threshold criterion, then report that label
if art.M[bmu] >= art.threshold
# Update the stored match and activation values
log_art_stats!(art, bmu, false)
# Current winner
y_hat = art.labels[bmu]
mismatch_flag = false
break
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If we did not find a resonant category
if mismatch_flag
# Update the stored match and activation values of the best matching unit
bmu = top_bmu
log_art_stats!(art, bmu, true)
# Report either the best matching unit or the mismatch label -1
y_hat = get_bmu ? art.labels[bmu] : -1
end
return y_hat
end
# -----------------------------------------------------------------------------
# INTERNAL FUNCTIONS
# -----------------------------------------------------------------------------
"""
Create a new category by appending and initializing a new FuzzyART node to F2.
# Arguments
- `art::DDVFA`: the DDVFA module to create a new FuzzyART category in.
- `sample::RealVector`: the sample to use for instantiating the new category.
- `label::Integer`: the new label to use for the new category.
"""
function create_category!(art::DDVFA, sample::RealVector, label::Integer)
# Global Fuzzy ART
art.n_categories += 1
push!(art.labels, label)
# Local Gamma-Normalized Fuzzy ART
push!(art.F2, FuzzyART(art.subopts, sample, preprocessed=true))
end
# -----------------------------------------------------------------------------
# DDVFA LINKAGE METHODS
# -----------------------------------------------------------------------------
# Argument docstring for the activation flag
const ACTIVATION_DOCSTRING = """
- `activation::Bool`: flag to use the activation function. False uses the match function.
"""
# Argument docstring for the sample vector
const SAMPLE_DOCSTRING = """
- `sample::RealVector`: the sample to use for computing the linkage to the F2 module.
"""
# Argument docstring for the F2 docstring
const F2_DOCSTRING = """
- `F2::FuzzyART`: the DDVFA FuzzyART F2 node to compute the linkage method within.
"""
# Argument docstring for the F2 field, includes the argument header
const FIELD_DOCSTRING = """
# Arguments
- `field::RealVector`: the DDVFA FuzzyART F2 node field (F2.T or F2.M) to compute the linkage for.
"""
"""
Compute the similarity metric depending on method with explicit comparisons for the field name.
# Arguments
- `method::Symbol`: the linkage method to use.
$F2_DOCSTRING
$SAMPLE_DOCSTRING
$ACTIVATION_DOCSTRING
"""
function similarity(method::Symbol, F2::FuzzyART, sample::RealVector, activation::Bool)
# Handle :centroid usage
if method === :centroid
value = eval(method)(F2, sample, activation)
# Handle :weighted usage
elseif method === :weighted
value = eval(method)(F2, activation)
# Handle common usage
else
value = eval(method)(activation ? F2.T : F2.M)
end
return value
end
"""
A list of all DDVFA similarity linkage methods.
"""
const DDVFA_METHODS = [
:single,
:average,
:complete,
:median,
:weighted,
:centroid,
]
"""
Single linkage DDVFA similarity function.
$FIELD_DOCSTRING
"""
function single(field::RealVector)
return maximum(field)
end
"""
Average linkage DDVFA similarity function.
$FIELD_DOCSTRING
"""
function average(field::RealVector)
return statistics_mean(field)
end
"""
Complete linkage DDVFA similarity function.
$FIELD_DOCSTRING
"""
function complete(field::RealVector)
return minimum(field)
end
"""
Median linkage DDVFA similarity function.
$FIELD_DOCSTRING
"""
function median(field::RealVector)
return statistics_median(field)
end
"""
Weighted linkage DDVFA similarity function.
# Arguments:
$F2_DOCSTRING
$ACTIVATION_DOCSTRING
"""
function weighted(F2::FuzzyART, activation::Bool)
if activation
value = F2.T' * (F2.n_instance ./ sum(F2.n_instance))
else
value = F2.M' * (F2.n_instance ./ sum(F2.n_instance))
end
return value
end
"""
Centroid linkage DDVFA similarity function.
# Arguments:
$F2_DOCSTRING
$SAMPLE_DOCSTRING
$ACTIVATION_DOCSTRING
"""
function centroid(F2::FuzzyART, sample::RealVector, activation::Bool)
Wc = vec(minimum(F2.W, dims=2))
T = norm(element_min(sample, Wc), 1) / (F2.opts.alpha + norm(Wc, 1))^F2.opts.gamma
if activation
value = T
else
value = (norm(Wc, 1)^F2.opts.gamma_ref) * T
end
return value
end
# -----------------------------------------------------------------------------
# CONVENIENCE METHODS
# -----------------------------------------------------------------------------
"""
Convenience function; return a concatenated array of all DDVFA weights.
# Arguments
- `art::DDVFA`: the DDVFA module to get all of the weights from as a list.
"""
function get_W(art::DDVFA)
# Return a concatenated array of the weights
return [art.F2[kx].W for kx = 1:art.n_categories]
end
"""
Convenience function; return the number of weights in each category as a vector.
# Arguments
- `art::DDVFA`: the DDVFA module to get all of the weights from as a list.
"""
function get_n_weights_vec(art::DDVFA)
return [art.F2[i].n_categories for i = 1:art.n_categories]
end
"""
Convenience function; return the sum total number of weights in the DDVFA module.
"""
function get_n_weights(art::DDVFA)
# Return the number of weights across all categories
return sum(get_n_weights_vec(art))
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 4876 | """
MergeART.jl
# Description
Includes all of the structures and logic for running a MergeART module.
# References
1. L. E. Brito da Silva, I. Elnabarawy, and D. C. Wunsch, 'Distributed dual vigilance fuzzy adaptive resonance theory learns online, retrieves arbitrarily-shaped clusters, and mitigates order dependence,' Neural Networks, vol. 121, pp. 208-228, 2020, doi: 10.1016/j.neunet.2019.08.033.
2. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
# -----------------------------------------------------------------------------
# TYPES
# -----------------------------------------------------------------------------
"""
MergeART options struct.
$(_OPTS_DOCSTRING)
"""
@with_kw mutable struct opts_MergeART <: ARTOpts @deftype Float
"""
Lower-bound vigilance parameter: rho_lb β [0, 1].
"""
rho_lb = 0.7; @assert rho_lb >= 0.0 && rho_lb <= 1.0
"""
Upper bound vigilance parameter: rho_ub β [0, 1].
"""
rho_ub = 0.85; @assert rho_ub >= 0.0 && rho_ub <= 1.0
"""
Choice parameter: alpha > 0.
"""
alpha = 1e-3; @assert alpha > 0.0
"""
Learning parameter: beta β (0, 1].
"""
beta = 1.0; @assert beta > 0.0 && beta <= 1.0
"""
Pseudo kernel width: gamma >= 1.
"""
gamma = 3.0; @assert gamma >= 1.0
"""
Reference gamma for normalization: 0 <= gamma_ref < gamma.
"""
gamma_ref = 1.0; @assert 0.0 <= gamma_ref && gamma_ref < gamma
"""
Similarity method (activation and match): similarity β [:single, :average, :complete, :median, :weighted, :centroid].
"""
similarity::Symbol = :single
"""
Maximum number of epochs during training: max_epochs β (1, Inf).
"""
max_epoch::Int = 1
"""
Display flag for progress bars.
"""
display::Bool = false
"""
Flag to normalize the threshold by the feature dimension.
"""
gamma_normalization::Bool = true
"""
Flag to use an uncommitted node when learning.
If true, new weights are created with ones(dim) and learn on the complement-coded sample.
If false, fast-committing is used where the new weight is simply the complement-coded sample.
"""
uncommitted::Bool = false
"""
Selected activation function.
"""
activation::Symbol = :gamma_activation
"""
Selected match function.
"""
match::Symbol = :gamma_match
"""
Selected weight update function.
"""
update::Symbol = :basic_update
"""
Flag to sort the F2 nodes by activation before the match phase
When true, the F2 nodes are sorted by activation before match.
When false, an iterative argmax and inhibition procedure is used to find the best-matching unit.
"""
sort::Bool = false
end
"""
MergeART module struct.
For module options, see [`AdaptiveResonance.opts_MergeART`](@ref).
# References
1. L. E. Brito da Silva, I. Elnabarawy, and D. C. Wunsch, 'Distributed dual vigilance fuzzy adaptive resonance theory learns online, retrieves arbitrarily-shaped clusters, and mitigates order dependence,' Neural Networks, vol. 121, pp. 208-228, 2020, doi: 10.1016/j.neunet.2019.08.033.
2. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
mutable struct MergeART <: ART
# Option Parameters
"""
DDVFA options struct.
"""
opts::opts_DDVFA
"""
FuzzyART options struct used for all F2 nodes.
"""
subopts::opts_FuzzyART
"""
Data configuration struct.
"""
config::DataConfig
# Working variables
"""
Operating module threshold value, a function of the vigilance parameter.
"""
threshold::Float
"""
List of F2 nodes (themselves FuzzyART modules).
"""
F2::Vector{FuzzyART}
"""
Incremental list of labels corresponding to each F2 node, self-prescribed or supervised.
"""
labels::ARTVector{Int}
"""
Number of total categories.
"""
n_categories::Int
"""
Current training epoch.
"""
epoch::Int
"""
DDVFA activation values.
"""
T::ARTVector
"""
DDVFA match values.
"""
M::ARTVector
"""
Runtime statistics for the module, implemented as a dictionary containing entries at the end of each training iteration.
These entries include the best-matching unit index and the activation and match values of the winning node.
"""
stats::ARTStats
end
# -----------------------------------------------------------------------------
# CONSTRUCTORS
# -----------------------------------------------------------------------------
# UNIMPLEMENTED
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 2599 | """
common.jl
# Description
Contains all common code for single ART modules (i.e. not distributed models).
"""
# -----------------------------------------------------------------------------
# TYPES
# -----------------------------------------------------------------------------
"""
Abstract supertype of FuzzyART modules.
"""
abstract type AbstractFuzzyART <: ART end
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
# COMMON DOC: AbstractFuzzyART initialization function
function initialize!(art::AbstractFuzzyART, x::RealVector ; y::Integer=0)
# Set the threshold
set_threshold!(art)
# Initialize the feature dimension of the weights
art.W = ARTMatrix{Float}(undef, art.config.dim_comp, 0)
# Set the label to either the supervised label or 1 if unsupervised
label = !iszero(y) ? y : 1
# Create a category with the given label
create_category!(art, x, label)
end
"""
Computes the activation and match functions of the ART module against sample x.
# Arguments
- `art::AbstractFuzzyART`: the single FuzzyART module to compute the activation and match values for all weights.
- `x::RealVector`: the sample to compute the activation and match functions against.
# Examples
```julia-repl
julia> my_FuzzyART = FuzzyART()
FuzzyART
opts: opts_FuzzyART
...
julia> x = rand(3, 10)
julia> train!(my_FuzzyART, x)
julia> activation_match!(my_FuzzyART, x[:, 1])
```
"""
function activation_match!(art::AbstractFuzzyART, x::RealVector)
# Expand the destination activation and match vectors
accommodate_vector!(art.T, art.n_categories)
accommodate_vector!(art.M, art.n_categories)
for i = 1:art.n_categories
art.T[i] = art_activation(art, x, i)
# If we are using gamma normalization, save some computation
if (art.opts.match == :gamma_match) && (art.opts.activation == :gamma_activation)
art.M[i] = art_match(art, x, i, art.T[i])
else
art.M[i] = art_match(art, x, i)
end
end
end
"""
In place learning function.
# Arguments
- `art::AbstractFuzzyART`: the FuzzyART module to update.
- `x::RealVector`: the sample to learn from.
- `index::Integer`: the index of the FuzzyART weight to update.
"""
function learn!(art::AbstractFuzzyART, x::RealVector, index::Integer)
# Compute the updated weight W
new_vec = art_learn(art, x, index)
# Replace the weight in place
replace_mat_index!(art.W, new_vec, index)
# Return empty
return
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 453 | """
single.jl
# Description
Aggregates the common code and all the modules of single (i.e. not distributed) ART modules.
"""
# -----------------------------------------------------------------------------
# INCLUDES
# -----------------------------------------------------------------------------
# Single ART module common code
include("common.jl")
# FuzzyART
include("modules/FuzzyART.jl")
# Dual-vigilance FuzzyART
include("modules/DVFA.jl")
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 11284 | """
DVFA.jl
# Description
Includes all of the structures and logic for running a Dual-Vigilance Fuzzy ART (DVFA) module.
# Authors
- MATLAB implementation: Leonardo Enzo Brito da Silva
- Julia port: Sasha Petrenko <[email protected]>
# References
1. L. E. Brito da Silva, I. Elnabarawy and D. C. Wunsch II, 'Dual Vigilance Fuzzy ART,' Neural Networks Letters. To appear.
2. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
# -----------------------------------------------------------------------------
# TYPES
# -----------------------------------------------------------------------------
"""
Dual Vigilance Fuzzy ART options struct.
$(_OPTS_DOCSTRING)
"""
@with_kw mutable struct opts_DVFA <: ARTOpts @deftype Float
"""
Lower-bound vigilance parameter: rho_lb β [0, 1].
"""
rho_lb = 0.55; @assert rho_lb >= 0.0 && rho_lb <= 1.0
"""
Upper bound vigilance parameter: rho_ub β [0, 1].
"""
rho_ub = 0.75; @assert rho_ub >= 0.0 && rho_ub <= 1.0
"""
Choice parameter: alpha > 0.
"""
alpha = 1e-3; @assert alpha > 0.0
"""
Learning parameter: beta β (0, 1].
"""
beta = 1.0; @assert beta > 0.0 && beta <= 1.0
"""
Maximum number of epochs during training.
"""
max_epoch::Int = 1
"""
Display flag for progress bars.
"""
display::Bool = false
"""
Flag to use an uncommitted node when learning.
If true, new weights are created with ones(dim) and learn on the complement-coded sample.
If false, fast-committing is used where the new weight is simply the complement-coded sample.
"""
uncommitted::Bool = false
"""
Selected activation function.
"""
activation::Symbol = :basic_activation
"""
Selected match function.
"""
match::Symbol = :unnormalized_match
"""
Selected weight update function.
"""
update::Symbol = :basic_update
"""
Flag to sort the F2 nodes by activation before the match phase
When true, the F2 nodes are sorted by activation before match.
When false, an iterative argmax and inhibition procedure is used to find the best-matching unit.
"""
sort::Bool = false
end
"""
Dual Vigilance Fuzzy ARTMAP module struct.
For module options, see [`AdaptiveResonance.opts_DVFA`](@ref).
# References:
1. L. E. Brito da Silva, I. Elnabarawy and D. C. Wunsch II, 'Dual Vigilance Fuzzy ART,' Neural Networks Letters. To appear.
2. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
mutable struct DVFA <: AbstractFuzzyART
# Get parameters
"""
DVFA options struct.
"""
opts::opts_DVFA
"""
Data configuration struct.
"""
config::DataConfig
# Working variables
"""
Operating upper bound module threshold value, a function of the upper bound vigilance parameter.
"""
threshold_ub::Float
"""
Operating lower bound module threshold value, a function of the lower bound vigilance parameter.
"""
threshold_lb::Float
"""
Incremental list of labels corresponding to each F2 node, self-prescribed or supervised.
"""
labels::ARTVector{Int}
"""
Category weight matrix.
"""
W::ARTMatrix{Float}
"""
Activation values for every weight for a given sample.
"""
T::ARTVector{Float}
"""
Match values for every weight for a given sample.
"""
M::ARTVector{Float}
"""
Number of category weights (F2 nodes).
"""
n_categories::Int
"""
Number of labeled clusters, may be lower than `n_categories`
"""
n_clusters::Int
"""
Current training epoch.
"""
epoch::Int
"""
Runtime statistics for the module, implemented as a dictionary containing entries at the end of each training iteration.
These entries include the best-matching unit index and the activation and match values of the winning node.
"""
stats::ARTStats
end
# -----------------------------------------------------------------------------
# CONSTRUCTORS
# -----------------------------------------------------------------------------
"""
Implements a DVFA learner with optional keyword arguments.
# Arguments
- `kwargs`: keyword arguments to pass to the DVFA options struct (see [`AdaptiveResonance.opts_DVFA`](@ref).)
# Examples
By default:
```julia-repl
julia> DVFA()
DVFA
opts: opts_DVFA
...
```
or with keyword arguments:
```julia-repl
julia> DVFA(rho=0.7)
DVFA
opts: opts_DVFA
...
```
"""
function DVFA(;kwargs...)
opts = opts_DVFA(;kwargs...)
DVFA(opts)
end
"""
Implements a DVFA learner with specified options.
# Arguments
- `opts::opts_DVFA`: the DVFA options (see [`AdaptiveResonance.opts_DVFA`](@ref)).
# Examples
```julia-repl
julia> my_opts = opts_DVFA()
julia> DVFA(my_opts)
DVFA
opts: opts_DVFA
...
```
"""
function DVFA(opts::opts_DVFA)
DVFA(
opts, # opts
DataConfig(), # config
0.0, # threshold_ub
0.0, # threshold_lb
ARTVector{Int}(undef, 0), # labels
ARTMatrix{Float}(undef, 0, 0), # W
ARTVector{Float}(undef, 0), # M
ARTVector{Float}(undef, 0), # T
0, # n_categories
0, # n_clusters
0, # epoch
build_art_stats(), # stats
)
end
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
# COMMON DOC: Set threshold function
function set_threshold!(art::DVFA)
# DVFA thresholds
art.threshold_ub = art.opts.rho_ub * art.config.dim
art.threshold_lb = art.opts.rho_lb * art.config.dim
end
"""
Creates a new category for the DVFA modules.
# Arguments
- `art::DVFA`: the DVFA module to add a category to.
- `x::RealVector`: the sample to use for adding a category.
- `y::Integer`: the new label for the new category.
"""
function create_category!(art::DVFA, x::RealVector, y::Integer ; new_cluster::Bool=true)
# Increment the number of categories
art.n_categories += 1
# If we are creating a new cluster altogether, increment that
new_cluster && (art.n_clusters += 1)
# If we use an uncommitted node
if art.opts.uncommitted
# Add a new weight of ones
append!(art.W, ones(art.config.dim_comp, 1))
# Learn the uncommitted node on the sample
learn!(art, x, art.n_categories)
else
# Fast commit the sample
append!(art.W, x)
end
# Update sample labels
push!(art.labels, y)
end
# COMMON DOC: Incremental DVFA training method
function train!(art::DVFA, x::RealVector ; y::Integer=0, preprocessed::Bool=false)
# Flag for if training in supervised mode
supervised = !iszero(y)
# Run the sequential initialization procedure
sample = init_train!(x, art, preprocessed)
# Initialization
if isempty(art.W)
# Set the first label as either 1 or the first provided label
y_hat = supervised ? y : 1
# Initialize the module with the first sample and label
initialize!(art, sample, y=y_hat)
# Return the selected label
return y_hat
end
# If label is new, break to make new category
if supervised && !(y in art.labels)
create_category!(art, sample, y)
return y
end
# Compute the activation and match for all categories
activation_match!(art, sample)
# Sort activation function values in descending order
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
# Default to mismatch
mismatch_flag = true
# Loop over all categories
for jx = 1:art.n_categories
# Best matching unit
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Vigilance test upper bound
if art.M[bmu] >= art.threshold_ub
# If supervised and the label differs, trigger mismatch
if supervised && (art.labels[bmu] != y)
break
end
# Learn the sample
learn!(art, sample, bmu)
# Update sample label for output
y_hat = art.labels[bmu]
# No mismatch
mismatch_flag = false
break
# Vigilance test lower bound
elseif art.M[bmu] >= art.threshold_lb
# Update sample labels
y_hat = supervised ? y : art.labels[bmu]
# Create a new category in the same cluster
create_category!(art, sample, y_hat, new_cluster=false)
# No mismatch
mismatch_flag = false
break
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If there was no resonant category, make a new one
if mismatch_flag
# Keep the bmu as the top activation despite creating a new category
bmu = top_bmu
# Create a new category-to-cluster label
y_hat = supervised ? y : art.n_clusters + 1
# Create a new category
create_category!(art, sample, y_hat)
end
# Update the stored match and activation values
log_art_stats!(art, bmu, mismatch_flag)
return y_hat
end
# COMMON DOC: Incremental DVFA classify method
function classify(art::DVFA, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false)
# Preprocess the data
sample = init_classify!(x, art, preprocessed)
# Compute activation and match functions
activation_match!(art, sample)
# Sort activation function values in descending order
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
# Default to mismatch
mismatch_flag = true
y_hat = -1
# Iterate over the list of activations
for jx in 1:art.n_categories
# Get the best-matching unit
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Vigilance check - pass
if art.M[bmu] >= art.threshold_ub
# Current winner
y_hat = art.labels[bmu]
mismatch_flag = false
break
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If we did not find a resonant category
if mismatch_flag
# Create new weight vector
bmu = top_bmu
# Report either the best matching unit or the mismatch label -1
y_hat = get_bmu ? art.labels[bmu] : -1
end
# Update the stored match and activation values
log_art_stats!(art, bmu, mismatch_flag)
# Return the inferred label
return y_hat
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 12050 | """
FuzzyART.jl
# Description
Includes all of the structures and logic for running a Gamma-Normalized Fuzzy ART module.
# Authors
- MATLAB implementation: Leonardo Enzo Brito da Silva
- Julia port: Sasha Petrenko <[email protected]>
# References
1. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
# -----------------------------------------------------------------------------
# TYPES
# -----------------------------------------------------------------------------
"""
Gamma-Normalized Fuzzy ART options struct.
$(_OPTS_DOCSTRING)
"""
@with_kw mutable struct opts_FuzzyART <: ARTOpts @deftype Float
"""
Vigilance parameter: rho β [0, 1].
"""
rho = 0.6; @assert rho >= 0.0 && rho <= 1.0
"""
Choice parameter: alpha > 0.
"""
alpha = 1e-3; @assert alpha > 0.0
"""
Learning parameter: beta β (0, 1].
"""
beta = 1.0; @assert beta > 0.0 && beta <= 1.0
"""
Pseudo kernel width: gamma >= 1.
"""
gamma = 3.0; @assert gamma >= 1.0
"""
Reference gamma for normalization: 0 <= gamma_ref < gamma.
"""
gamma_ref = 1.0; @assert 0.0 <= gamma_ref && gamma_ref <= gamma
"""
Maximum number of epochs during training: max_epochs β (1, Inf).
"""
max_epoch::Int = 1
"""
Display flag for progress bars.
"""
display::Bool = false
"""
Flag to normalize the threshold by the feature dimension.
**NOTE**: this flag overwrites the `activation` and `match` settings here to their gamma-normalized equivalents along with adjusting the thresold.
"""
gamma_normalization::Bool = false
"""
Flag to use an uncommitted node when learning.
If true, new weights are created with ones(dim) and learn on the complement-coded sample.
If false, fast-committing is used where the new weight is simply the complement-coded sample.
"""
uncommitted::Bool = false
"""
Selected activation function.
"""
activation::Symbol = :basic_activation
"""
Selected match function.
"""
match::Symbol = :basic_match
"""
Selected weight update function.
"""
update::Symbol = :basic_update
"""
Flag to sort the F2 nodes by activation before the match phase
When true, the F2 nodes are sorted by activation before match.
When false, an iterative argmax and inhibition procedure is used to find the best-matching unit.
"""
sort::Bool = false
end
"""
Gamma-Normalized Fuzzy ART learner struct
For module options, see [`AdaptiveResonance.opts_FuzzyART`](@ref).
# References
1. G. Carpenter, S. Grossberg, and D. Rosen, 'Fuzzy ART: Fast stable learning and categorization of analog patterns by an adaptive resonance system,' Neural Networks, vol. 4, no. 6, pp. 759-771, 1991.
"""
mutable struct FuzzyART <: AbstractFuzzyART
"""
FuzzyART options struct.
"""
opts::opts_FuzzyART
"""
Data configuration struct.
"""
config::DataConfig
"""
Operating module threshold value, a function of the vigilance parameter.
"""
threshold::Float
"""
Incremental list of labels corresponding to each F2 node, self-prescribed or supervised.
"""
labels::ARTVector{Int}
"""
Activation values for every weight for a given sample.
"""
T::ARTVector{Float}
"""
Match values for every weight for a given sample.
"""
M::ARTVector{Float}
"""
Category weight matrix.
"""
W::ARTMatrix{Float}
"""
Number of weights associated with each category.
"""
n_instance::ARTVector{Int}
"""
Number of category weights (F2 nodes).
"""
n_categories::Int
"""
Current training epoch.
"""
epoch::Int
"""
Runtime statistics for the module, implemented as a dictionary containing entries at the end of each training iteration.
These entries include the best-matching unit index and the activation and match values of the winning node.
"""
stats::ARTStats
end
# -----------------------------------------------------------------------------
# CONSTRUCTORS
# -----------------------------------------------------------------------------
"""
Implements a Fuzzy ART learner with optional keyword arguments.
# Arguments
- `kwargs`: keyword arguments of FuzzyART options (see [`AdaptiveResonance.opts_FuzzyART`](@ref)).
# Examples
By default:
```julia-repl
julia> FuzzyART()
FuzzyART
opts: opts_FuzzyART
...
```
or with keyword arguments:
```julia-repl
julia> FuzzyART(rho=0.7)
FuzzyART
opts: opts_FuzzyART
...
```
"""
function FuzzyART(;kwargs...)
# Create the options from the keyword arguments
opts = opts_FuzzyART(;kwargs...)
# Instantiate and return a constructed module
return FuzzyART(opts)
end
"""
Implements a Fuzzy ART learner with specified options.
# Arguments
- `opts::opts_FuzzyART`: the FuzzyART options struct with specified options (see [`AdaptiveResonance.opts_FuzzyART`](@ref)).
# Examples
```julia-repl
julia> FuzzyART(opts)
FuzzyART
opts: opts_FuzzyART
...
```
"""
function FuzzyART(opts::opts_FuzzyART)
# Enforce dependent options for gamma normalization
if opts.gamma_normalization
opts.activation = :gamma_activation
opts.match = :gamma_match
end
# Construct an empty FuzzyART module
return FuzzyART(
opts, # opts
DataConfig(), # config
0.0, # threshold
ARTVector{Int}(undef, 0), # labels
ARTVector{Float}(undef, 0), # T
ARTVector{Float}(undef, 0), # M
ARTMatrix{Float}(undef, 0, 0), # W
ARTVector{Int}(undef, 0), # n_instance
0, # n_categories
0, # epoch
build_art_stats(), # stats
)
end
"""
Create and initialize a FuzzyART with a single sample in one step.
Principally used as a method for initialization within DDVFA.
# Arguments
- `opts::opts_FuzzyART`: the FuzzyART options contains.
- `sample::RealVector`: the sample to use as a basis for setting up the FuzzyART.
- `preprocessed::Bool=false`: flag for if the sample is already complement coded and normalized.
"""
function FuzzyART(opts::opts_FuzzyART, sample::RealVector ; preprocessed::Bool=false)
# Instantiate the module from the options
art = FuzzyART(opts)
# Set up the training dependencies
init_train!(sample, art, preprocessed)
# Initialize the module on the first sample
initialize!(art, sample)
# Return the constructed and initialized module
return art
end
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
# COMMON DOC: Set threshold function
function set_threshold!(art::FuzzyART)
# Set the normalized threshold
if art.opts.gamma_normalization
art.threshold = art.opts.rho * (art.config.dim ^ art.opts.gamma_ref)
# Otherwise, vigilance parameter is the threshold
else
art.threshold = art.opts.rho
end
end
# COMMON DOC: create_category! function
function create_category!(art::FuzzyART, x::RealVector, y::Integer)
# Increment the number of categories
art.n_categories += 1
# Increment number of samples associated with new category
push!(art.n_instance, 1)
# If we use an uncommitted node
if art.opts.uncommitted
# Add a new weight of ones
append!(art.W, ones(art.config.dim_comp, 1))
# Learn the uncommitted node on the sample
learn!(art, x, art.n_categories)
else
# Fast commit the sample
append!(art.W, x)
end
# Add the label for the category
push!(art.labels, y)
end
# COMMON DOC: FuzzyART incremental training method
function train!(art::FuzzyART, x::RealVector ; y::Integer=0, preprocessed::Bool=false)
# Flag for if training in supervised mode
supervised = !iszero(y)
# Run the sequential initialization procedure
sample = init_train!(x, art, preprocessed)
# Initialization
if isempty(art.W)
# Set the first label as either 1 or the first provided label
y_hat = supervised ? y : 1
# Initialize the module with the first sample and label
initialize!(art, sample, y=y_hat)
# Return the selected label
return y_hat
end
# If we have a new supervised category, create a new category
if supervised && !(y in art.labels)
create_category!(art, sample, y)
return y
end
# Compute activation/match functions
activation_match!(art, sample)
# Sort activation function values in descending order
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
# Initialize mismatch as true
mismatch_flag = true
y_hat = -1
# Loop over all categories
for jx = 1:art.n_categories
# Best matching unit
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Vigilance check - pass
if art.M[bmu] >= art.threshold
# If supervised and the label differed, force mismatch
if supervised && (art.labels[bmu] != y)
break
end
# Learn the sample
learn!(art, sample, bmu)
# Increment the instance counting
art.n_instance[bmu] += 1
# Save the output label for the sample
y_hat = art.labels[bmu]
# No mismatch
mismatch_flag = false
break
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If there was no resonant category, make a new one
if mismatch_flag
# Keep the bmu as the top activation despite creating a new category
bmu = top_bmu
# Get the correct label for the new category
y_hat = supervised ? y : art.n_categories + 1
# Create a new category
create_category!(art, sample, y_hat)
end
# Update the stored match and activation values
log_art_stats!(art, bmu, mismatch_flag)
# Return the training label
return y_hat
end
# COMMON DOC: FuzzyART incremental classification method
function classify(art::FuzzyART, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false)
# Preprocess the data
sample = init_classify!(x, art, preprocessed)
# Compute activation and match functions
activation_match!(art, sample)
# Sort activation function values in descending order
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
# Default is mismatch
mismatch_flag = true
y_hat = -1
# Iterate over all categories
for jx in 1:art.n_categories
# Get the best-matching unit
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Vigilance check - pass
if art.M[bmu] >= art.threshold
# Current winner
y_hat = art.labels[bmu]
mismatch_flag = false
break
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If we did not find a match
if mismatch_flag
# Report either the best matching unit or the mismatch label -1
bmu = top_bmu
# Report either the best matching unit or the mismatch label -1
y_hat = get_bmu ? art.labels[bmu] : -1
end
# Update the stored match and activation values
log_art_stats!(art, bmu, mismatch_flag)
# Return the inferred label
return y_hat
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 917 | """
ARTMAP.jl
# Description
Includes all of the ARTMAP (i.e., explicitly supervised) ART modules definitions.
"""
# -----------------------------------------------------------------------------
# INCLUDES
# -----------------------------------------------------------------------------
# Common code for all ARTMAP modules, including common dispatches and docstrings
include("common.jl")
# FuzzyARTMAP
include("FAM.jl")
# Simplified Fuzzy ARTMAP
include("SFAM.jl")
# ARTSCENE filters
include("ARTSCENE.jl")
# ARTMAP variants
include("variants.jl")
# -----------------------------------------------------------------------------
# AGGREGATIONS
# -----------------------------------------------------------------------------
"""
A list of supervised ARTMAP modules that are available in the `AdaptiveResonance.jl` package.
"""
const ARTMAP_MODULES = [
# Core modules
SFAM,
# Variants
DAM,
]
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 10525 | """
ARTSCENE.jl
# Description
All of the visual filter functions for the ARTSCENE algorithm.
"""
# -----------------------------------------------------------------------------
# DEPENDENCIES
# -----------------------------------------------------------------------------
using
Distributed,
SharedArrays
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
"""
ARTSCENE Stage 1: Color-to-gray image transformation.
"""
function color_to_gray(image::Array{T, 3}) where T <: RealFP
# Treat the image as a column-major array, cast to grayscale
_, n_row, n_column = size(image)
return [sum(image[:, i, j])/3 for i=1:n_row, j=1:n_column]
end
"""
Surround kernel S function for ARTSCENE Stage 2.
"""
function surround_kernel(i::Integer, j::Integer, p::Integer, q::Integer, scale::Integer)
return 1/(2*pi*scale^2)*MathConstants.e^(-((i-p)^2 + (j-q)^2)/(2*scale^2))
end
"""
Time rate of change of LGN network (ARTSCENE Stage 2).
"""
function ddt_x(x::RealArray, image::RealArray, sigma_s::RealArray)
n_row, n_column = size(x)
n_g = length(sigma_s)
kernel_r = 5
dx = SharedArray{Float, 3}((n_row, n_column, n_g))
@sync @distributed for g = 1:n_g
for i = 1:n_row
for j = 1:n_column
# Compute the surround kernel
kernel_h = max(1, i-kernel_r):min(n_row, i + kernel_r)
kernel_w = max(1, j-kernel_r):min(n_row, j + kernel_r)
S_ijg_I = sum([surround_kernel(i, j, p, q, sigma_s[g])*image[p, q]
for p in kernel_h, q in kernel_w])
# Compute the enhanced contrast
dx[i,j,g] = - x[i,j,g] + (1 - x[i,j,g])*image[i,j] - (1 + x[i,j,g])*S_ijg_I
end
end
end
return dx
end
"""
ARTSCENE Stage 2: Constrast normalization.
"""
function contrast_normalization(image::RealArray)
# All scale parameters
sigma_s = [1, 4, 8, 12]
n_g = length(sigma_s)
# Number if iterations to settle on contrast result
n_iterations = 4
# Get the shape of the image
n_row, n_column = size(image)
x = zeros(n_row, n_column, n_g)
for g = 1:n_g
x[:,:, g] = deepcopy(image)
end
for i = 1:n_iterations
x += ddt_x(x, image, sigma_s)
end
return x
end
"""
Oriented, elongated, spatially offset kernel G for ARTSCENE Stage 3.
"""
function oriented_kernel(i::Integer, j::Integer, p::Integer, q::Integer, k::Integer, sigma_h::Real, sigma_v::Real ; sign::AbstractString="plus")
m = sin(pi*k/4)
n = cos(pi*k/4)
if sign == "plus"
G = (1/(2*pi*sigma_h*sigma_v)*
MathConstants.e^(-0.5*((((p-i+m)*n-(q-j+n)*m)/sigma_h)^2
+(((p-i+m)*m+(q-j+n)*n)/sigma_v)^2)))
elseif sign == "minus"
G = (1/(2*pi*sigma_h*sigma_v)*
MathConstants.e^(-0.5*((((p-i-m)*n-(q-j-n)*m)/sigma_h)^2
+(((p-i-m)*m+(q-j-n)*n)/sigma_v)^2)))
else
throw("Incorrect sign option for oriented kernel function")
end
return G
end
"""
Shunting equation for ARTSCENE Stage 3.
"""
function ddt_y(y::RealArray, X_plus::RealArray, X_minus::RealArray, alpha::Real)
# n_row, n_column = size(x) # TODO: SOURCE OF WRONGNESS
n_row, n_column = size(y)
n_k = 4
sigma_v = [0.25, 1, 2, 3]
sigma_h = [0.75, 3, 6, 9]
n_g = length(sigma_v)
kernel_r = 5
# dy = zeros(n_row, n_column, n_k, n_g)
# for k = 1:n_k
dy = SharedArray{Float, 4}((n_row, n_column, n_k, n_g))
@sync @distributed for k = 1:n_k
for g = 1:n_g
for i = 1:n_row
for j = 1:n_column
# Compute the surround kernel
kernel_h = max(1, i-kernel_r):min(n_row, i + kernel_r)
kernel_w = max(1, j-kernel_r):min(n_row, j + kernel_r)
Gp = [oriented_kernel(i, j, p, q, k-1, sigma_h[g], sigma_v[g], sign="plus")
for p in kernel_h, q in kernel_w]
Gm = [oriented_kernel(i, j, p, q, k-1, sigma_h[g], sigma_v[g], sign="minus")
for p in kernel_h, q in kernel_w]
dy[i,j,g,k] = (-alpha*y[i,j,g,k]
+ (1-y[i,j,g,k])*sum(X_plus[kernel_h, kernel_w, g].*Gp
+ X_minus[kernel_h, kernel_w, g].*Gm)
- (1+y[i,j,g,k])*sum(X_plus[kernel_h, kernel_w, g].*Gm
+ X_minus[kernel_h, kernel_w, g].*Gp))
end
end
end
end
return dy
end
"""
ARTSCENE Stage 3: Contrast-sensitive oriented filtering.
"""
function contrast_sensitive_oriented_filtering(image::RealArray, x::RealArray)
# Get the size of the field
n_row, n_column = size(x)
# Parameters
n_g = 4 # Number of scales
n_k = 4 # Number of orientations
alpha = 1 # Passive decay rate
n_iterations = 4 # Number if iterations to settle on contrast result
# Compute the LGN ON-cell and OFF-cell output signals
X_plus = [max(0, x[i,j,g]) for i=1:n_row, j=1:n_column, g=1:n_g]
X_minus = [max(0, -x[i,j,g]) for i=1:n_row, j=1:n_column, g=1:n_g]
# Get the shape of the image
n_row, n_column = size(x)
y = zeros(n_row, n_column, n_g, n_k)
for k = 1:n_k
y[:,:,:,k] = deepcopy(x)
end
for _ = 1:n_iterations
y += ddt_y(y, X_plus, X_minus, alpha)
end
return y
end
"""
ARTSCENE Stage 4: Contrast-insensitive oriented filtering.
"""
function contrast_insensitive_oriented_filtering(y::RealArray)
n_row, n_column, n_g, n_k = size(y)
# Compute the LGN ON-cell and OFF-cell output signals
Y_plus = [max(0, y[i,j,g,k]) for i=1:n_row, j=1:n_column, g=1:n_g, k=1:n_k]
Y_minus = [max(0, -y[i,j,g,k]) for i=1:n_row, j=1:n_column, g=1:n_g, k=1:n_k]
return Y_plus + Y_minus
end
"""
Competition kernel for ARTSCENE: Stage 5.
"""
function competition_kernel(l::Integer, k::Integer ; sign::AbstractString="plus")
if sign == "plus"
g = ( 1/(0.5*sqrt(2*pi))*MathConstants.e^(-0.5*((l-k)/0.5)^2) )
elseif sign == "minus"
g = ( 1/(sqrt(2*pi))*MathConstants.e^(-0.5*(l-k)^2) )
else
throw("Incorrect sign option for oriented kernel function")
end
return g
end
"""
Time rate of change for ARTSCENE: Stage 5.
"""
function ddt_z(z::RealArray)
n_row, n_column, n_g, n_k = size(z)
kernel_r = 5
dz = SharedArray{Float, 4}((n_row, n_column, n_k, n_g))
@sync @distributed for k = 1:n_k
for g = 1:n_g
for i = 1:n_row
for j = 1:n_column
zgp = sum([z[i,j,g,l]*competition_kernel(l,k,sign="plus") for l = 1:n_g])
zgm = sum([z[i,j,g,l]*competition_kernel(l,k,sign="minus") for l = 1:n_g])
dz[i,j,g,k] = (- z[i,j,g,k]
+ (1 - z[i,j,g,k]*zgp)
- (1 + z[i,j,g,k]*zgm))
end
end
end
end
return dz
end
"""
ARTSCENE Stage 5: Orientation competition at the same position.
"""
function orientation_competition(z::RealArray)
# Parameters
n_iterations = 4 # Number if iterations to settle on contrast result
# Get the shape of the image
# n_row, n_column, n_g, n_k = size(z)
# Z = zeros(n_row, n_column, n_g, n_k)
# for k = 1:n_k
# Z[:,:,:,k] = deepcopy(z)
# end
for _ = 1:n_iterations
z += ddt_z(z)
end
return z
end
"""
ARTSCENE Stage 6: Create patch feature vectors.
"""
function patch_orientation_color(z::RealArray, image::RealArray)
n_i, n_j, n_g, n_k = size(z)
patch_i = 4
patch_j = 4
n_colors = 3
n_patches = patch_i * patch_j
size_i = n_i / patch_i
size_j = n_j / patch_j
size_patch = size_i * size_j
O = zeros(patch_i, patch_j, n_g, n_k)
C = zeros(patch_i, patch_j, n_colors)
for p_i = 1:patch_i
for p_j = 1:patch_j
# Get the correct range objects for the grid
i_range = Int(floor(size_i*(p_i-1)+1)):Int(floor(size_i*p_i))
j_range = Int(floor(size_j*(p_j-1)+1)):Int(floor(size_j*p_j))
# Compute the color averages
for c = 1:n_colors
C[p_i,p_j,c] = 1/size_patch*sum(image[c, i_range, j_range])
end
# Compute the orientation averages
for k = 1:4
for g = 1:4
O[p_i, p_j, k, g] = 1/size_patch * sum(z[i_range, j_range, k, g])
end
end
end
end
return O, C
end
"""
Process the full artscene filter toolchain on an image.
# Arguments
- `raw_image::Array{Real, 3}`: the raw RGB image to process with the ARTSCENE filter.
"""
function artscene_filter(raw_image::Array{T, 3}) where T <: RealFP
# Get the number of workers
n_processes = nprocs()
n_workers = nworkers()
@debug "ARTSCENE - Parallel processes: $n_processes, Workers: $n_workers"
# Random image
image_size = size(raw_image)
image_type = typeof(raw_image)
@debug "ARTSCENE - Original: Size = $image_size, Type = $image_type"
# Stage 1: Grayscale
image = color_to_gray(raw_image)
image_size = size(image)
image_type = typeof(image)
@debug "ARTSCENE - Stage 1 Complete: Grayscale: Size = $image_size, Type = $image_type"
# Stage 2: Contrast normalization
x = contrast_normalization(image)
image_size = size(x)
image_type = typeof(x)
@debug "ARTSCENE - Stage 2 Complete: Contrast: Size = $image_size, Type = $image_type"
# Stage 3: Contrast-sensitive oriented filtering
y = contrast_sensitive_oriented_filtering(image, x)
image_size = size(y)
image_type = typeof(y)
@debug "ARTSCENE - Stage 3 Complete: Sensitive Oriented: Size = $image_size, Type = $image_type"
# Stage 4: Contrast-insensitive oriented filtering
z = contrast_insensitive_oriented_filtering(y)
image_size = size(z)
image_type = typeof(z)
@debug "ARTSCENE - Stage 4 Complete: Insensitive Oriented: Size = $image_size, Type = $image_type"
# Stage 5: Orientation competition
z = orientation_competition(z)
image_size = size(z)
image_type = typeof(z)
@debug "ARTSCENE - Stage 5 Complete: Orientation Competition: Size = $image_size, Type = $image_type"
# *Stage 6*: Compute patch vectors (orientation and color)
O, C = patch_orientation_color(z, raw_image)
@debug "ARTSCENE - Stage 6 Complete"
return O, C
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 3595 | """
FAM.jl
# Description:
Options, structures, and logic for the Fuzzy ARTMAP (FAM) module.
# References:
1. G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, βFuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,β IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059.
"""
# -----------------------------------------------------------------------------
# TYPES
# -----------------------------------------------------------------------------
"""
Implements a Fuzzy ARTMAP learner's options.
$(_OPTS_DOCSTRING)
"""
@with_kw mutable struct opts_FAM <: ARTOpts @deftype Float
"""
Vigilance parameter: rho β [0, 1].
"""
rho = 0.6; @assert rho >= 0.0 && rho <= 1.0
"""
Choice parameter: alpha > 0.
"""
alpha = 1e-7; @assert alpha > 0.0
"""
Match tracking parameter: epsilon β (0, 1).
"""
epsilon = 1e-3; @assert epsilon > 0.0 && epsilon < 1.0
"""
Learning parameter: beta β (0, 1].
"""
beta = 1.0; @assert beta > 0.0 && beta <= 1.0
"""
Maximum number of epochs during training: max_epochs β [1, Inf)
"""
max_epochs::Int = 1
"""
Uncommitted node flag.
"""
uncommitted::Bool = true
"""
Display flag for progress bars.
"""
display::Bool = false
"""
Flag to sort the F2 nodes by activation before the match phase
When true, the F2 nodes are sorted by activation before match.
When false, an iterative argmax and inhibition procedure is used to find the best-matching unit.
"""
sort::Bool = false
end
"""
Fuzzy ARTMAP struct.
For module options, see [`AdaptiveResonance.opts_FAM`](@ref).
# References
1. G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, βFuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,β IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059.
"""
mutable struct FAM <: ARTMAP
"""
Fuzzy ARTMAP options struct.
"""
opts::opts_FAM
"""
Data configuration struct.
"""
config::DataConfig
"""
Category weight matrix.
"""
W::ARTMatrix{Float}
"""
Incremental list of labels corresponding to each F2 node, self-prescribed or supervised.
"""
labels::ARTVector{Int}
"""
Number of category weights (F2 nodes).
"""
n_categories::Int
"""
Current training epoch.
"""
epoch::Int
end
# -----------------------------------------------------------------------------
# CONSTRUCTORS
# -----------------------------------------------------------------------------
"""
Implements a Fuzzy ARTMAP learner with optional keyword arguments.
# Examples
By default:
```julia-repl
julia> FAM()
FAM
opts: opts_FAM
...
```
or with keyword arguments:
```julia-repl
julia> FAM(rho=0.7)
FAM
opts: opts_FAM
...
```
"""
function FAM(;kwargs...)
opts = opts_FAM(;kwargs...)
FAM(opts)
end
"""
Implements a Fuzzy ARTMAP learner with specified options.
# Examples
```julia-repl
julia> opts = opts_FAM()
julia> FAM(opts)
FAM
opts: opts_FAM
...
```
"""
function FAM(opts::opts_FAM)
FAM(
opts, # opts_FAM
DataConfig(), # config
ARTMatrix{Float}(undef, 0, 0), # W
ARTVector{Int}(undef, 0), # labels
0, # n_categories
0 # epoch
)
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 10484 | """
SFAM.jl
# Description:
Options, structures, and logic for the Simplified Fuzzy ARTMAP (SFAM) module.
# References:
1. G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, βFuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,β IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059.
"""
# -----------------------------------------------------------------------------
# TYPES
# -----------------------------------------------------------------------------
"""
Implements a Simple Fuzzy ARTMAP learner's options.
$(_OPTS_DOCSTRING)
"""
@with_kw mutable struct opts_SFAM <: ARTOpts @deftype Float
"""
Vigilance parameter: rho β [0, 1].
"""
rho = 0.75; @assert rho >= 0.0 && rho <= 1.0
"""
Choice parameter: alpha > 0.
"""
alpha = 1e-7; @assert alpha > 0.0
"""
Match tracking parameter: epsilon β (0, 1).
"""
epsilon = 1e-3; @assert epsilon > 0.0 && epsilon < 1.0
"""
Learning parameter: beta β (0, 1].
"""
beta = 1.0; @assert beta > 0.0 && beta <= 1.0
"""
Maximum number of epochs during training: max_epoch β [1, Inf).
"""
max_epoch::Int = 1
"""
Display flag for progress bars.
"""
display::Bool = false
"""
Flag to use an uncommitted node when learning.
If true, new weights are created with ones(dim) and learn on the complement-coded sample.
If false, fast-committing is used where the new weight is simply the complement-coded sample.
"""
uncommitted::Bool = false
"""
Selected match function.
"""
match::Symbol = :basic_match
"""
Selected activation function.
"""
activation::Symbol = :basic_activation
"""
Selected weight update function.
"""
update::Symbol = :basic_update
"""
Flag to sort the F2 nodes by activation before the match phase
When true, the F2 nodes are sorted by activation before match.
When false, an iterative argmax and inhibition procedure is used to find the best-matching unit.
"""
sort::Bool = false
end
"""
Simple Fuzzy ARTMAP struct.
For module options, see [`AdaptiveResonance.opts_SFAM`](@ref).
# References
1. G. A. Carpenter, S. Grossberg, N. Markuzon, J. H. Reynolds, and D. B. Rosen, βFuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps,β IEEE Trans. Neural Networks, vol. 3, no. 5, pp. 698-713, 1992, doi: 10.1109/72.159059.
"""
mutable struct SFAM <: ARTMAP
"""
Simplified Fuzzy ARTMAP options struct.
"""
opts::opts_SFAM
"""
Data configuration struct.
"""
config::DataConfig
"""
Category weight matrix.
"""
W::ARTMatrix{Float}
"""
Incremental list of labels corresponding to each F2 node, self-prescribed or supervised.
"""
labels::ARTVector{Int}
"""
Number of category weights (F2 nodes).
"""
n_categories::Int
"""
Current training epoch.
"""
epoch::Int
"""
DDVFA activation values.
"""
T::ARTVector{Float}
"""
DDVFA match values.
"""
M::ARTVector{Float}
"""
Runtime statistics for the module, implemented as a dictionary containing entries at the end of each training iteration.
These entries include the best-matching unit index and the activation and match values of the winning node.
"""
stats::ARTStats
end
# -----------------------------------------------------------------------------
# CONSTRUCTORS
# -----------------------------------------------------------------------------
"""
Implements a Simple Fuzzy ARTMAP learner with optional keyword arguments.
# Arguments
- `kwargs`: keyword arguments to pass to the Simple Fuzzy ARTMAP options struct (see [`AdaptiveResonance.opts_SFAM`](@ref).)
# Examples
By default:
```julia-repl
julia> SFAM()
SFAM
opts: opts_SFAM
...
```
or with keyword arguments:
```julia-repl
julia> SFAM(rho=0.6)
SFAM
opts: opts_SFAM
...
```
"""
function SFAM(;kwargs...)
opts = opts_SFAM(;kwargs...)
SFAM(opts)
end
"""
Implements a Simple Fuzzy ARTMAP learner with specified options.
# Arguments
- `opts::opts_SFAM`: the Simple Fuzzy ARTMAP options (see [`AdaptiveResonance.opts_SFAM`](@ref)).
# Examples
```julia-repl
julia> opts = opts_SFAM()
julia> SFAM(opts)
SFAM
opts: opts_SFAM
...
```
"""
function SFAM(opts::opts_SFAM)
SFAM(
opts, # opts_SFAM
DataConfig(), # config
ARTMatrix{Float}(undef, 0, 0), # W
ARTVector{Int}(undef, 0), # labels
0, # n_categories
0, # epoch
ARTVector{Float}(undef, 0), # T
ARTVector{Float}(undef, 0), # M
build_art_stats(), # stats
)
end
# -----------------------------------------------------------------------------
# ALGORITHMIC METHODS
# -----------------------------------------------------------------------------
# COMMON DOC: SFAM initialization
function initialize!(art::SFAM, x::RealVector, y::Integer)
# Initialize the weight matrix feature dimension
art.W = ARTMatrix{Float}(undef, art.config.dim_comp, 0)
# Create a new category from the sample
create_category!(art, x, y)
end
# COMMON DOC: SFAM category creation
function create_category!(art::SFAM, x::RealVector, y::Integer)
# Increment the number of categories
art.n_categories += 1
# If we use an uncommitted node
if art.opts.uncommitted
# Add a new weight of ones
append!(art.W, ones(art.config.dim_comp, 1))
# Learn the uncommitted node on the sample
learn!(art, x, art.n_categories)
else
# Fast commit the sample
append!(art.W, x)
end
# Increment number of samples associated with new category
# push!(art.n_instance, 1)
# Add the label for the category
push!(art.labels, y)
end
# SFAM incremental training method
function train!(art::SFAM, x::RealVector, y::Integer ; preprocessed::Bool=false)
# Run the sequential initialization procedure
sample = init_train!(x, art, preprocessed)
# Initialization
if isempty(art.W)
initialize!(art, sample, y)
return y
end
# If we don't have the label, create a new category immediately
if !(y in art.labels)
create_category!(art, sample, y)
# Otherwise, test for a match
else
# Baseline vigilance parameter
rho_baseline = art.opts.rho
# Compute the activation for all categories
accommodate_vector!(art.T, art.n_categories)
for jx in 1:art.n_categories
art.T[jx] = art_activation(art, sample, jx)
end
# Sort activation function values in descending order
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
mismatch_flag = true
accommodate_vector!(art.M, art.n_categories)
for jx in 1:art.n_categories
# Set the best-matching-unit index
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Compute match function
art.M[bmu] = art_match(art, sample, bmu)
# Current winner
if art.M[bmu] >= rho_baseline
if y == art.labels[bmu]
# Update the weight and break
learn!(art, sample, bmu)
mismatch_flag = false
break
else
# Match tracking
rho_baseline = art.M[bmu] + art.opts.epsilon
end
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If we triggered a mismatch
if mismatch_flag
# Keep the bmu as the top activation despite creating a new category
bmu = top_bmu
# Create new weight vector
create_category!(art, sample, y)
end
# Update the stored match and activation values
log_art_stats!(art, bmu, mismatch_flag)
end
# ARTMAP guarantees correct training classification, so just return the label
return y
end
# SFAM incremental classification method
function classify(art::SFAM, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false)
# Run the sequential initialization procedure
sample = init_classify!(x, art, preprocessed)
# Compute the activation for all categories
accommodate_vector!(art.T, art.n_categories)
for jx in 1:art.n_categories
art.T[jx] = art_activation(art, sample, jx)
end
# Sort activation function values in descending order
if art.opts.sort
index = sortperm(art.T, rev=true)
top_bmu = index[1]
else
top_bmu = argmax(art.T)
end
# Default to mismatch
mismatch_flag = true
y_hat = -1
# Iterate over the list of activations
accommodate_vector!(art.M, art.n_categories)
for jx in 1:art.n_categories
# Set the best-matching-unit index
if art.opts.sort
bmu = index[jx]
else
bmu = argmax(art.T)
end
# Compute match function
art.M[bmu] = art_match(art, sample, bmu)
# Current winner
if art.M[bmu] >= art.opts.rho
y_hat = art.labels[bmu]
mismatch_flag = false
break
elseif !art.opts.sort
# Remove the top activation
art.T[bmu] = 0.0
end
end
# If we did not find a resonant category
if mismatch_flag
# Keep the bmu as the top activation
bmu = top_bmu
# Report either the best matching unit or the mismatch label -1
y_hat = get_bmu ? art.labels[bmu] : -1
end
# Update the stored match and activation values
log_art_stats!(art, bmu, mismatch_flag)
return y_hat
end
"""
In-place learning function.
"""
function learn!(art::SFAM, x::RealVector, index::Integer)
# Compute the updated weight W
new_vec = art_learn(art, x, index)
# Replace the weight in place
replace_mat_index!(art.W, new_vec, index)
# Return empty
return
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 2976 | """
common.jl
# Description:
Includes all of the unsupervised ARTMAP modules common code.
"""
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
"""
train!(art::ARTMAP, x::RealMatrix, y::IntegerVector, preprocessed::Bool=false)
Train the ARTMAP model on a batch of data 'x' with supervisory labels 'y.'
# Arguments
- `art::ARTMAP`: the supervised ARTMAP model to train.
- `x::RealMatrix`: the 2-D dataset containing columns of samples with rows of features.
- `y::IntegerVector`: labels for supervisory training.
- `preprocessed::Bool=false`: flag, if the data has already been complement coded or not.
"""
function train!(art::ARTMAP, x::RealMatrix, y::IntegerVector, preprocessed::Bool=false)
# Show a message if display is on
art.opts.display && @info "Training $(typeof(art))"
# Data information and setup
n_samples = length(y)
# Run the batch initialization procedure
x = init_train!(x, art, preprocessed)
# Initialize the output vector
y_hat = zeros(Int, n_samples)
# Learn until the stopping conditions
art.epoch = 0
while true
# Increment the epoch and get the iterator
art.epoch += 1
iter = get_iterator(art.opts, n_samples)
for i = iter
# Update the iterator if necessary
update_iter(art, iter, i)
# Grab the sample slice
sample = get_sample(x, i)
label = y[i]
# Train upon the sample and label
y_hat[i] = train!(art, sample, label, preprocessed=true)
end
# Check stopping conditions
if stopping_conditions(art)
break
end
end
return y_hat
end
# -----------------------------------------------------------------------------
# COMMON DOCUMENTATION
# -----------------------------------------------------------------------------
@doc raw"""
Train the supervised ARTMAP model on a single sample of features 'x' with supervisory label 'y'.
# Arguments
- `art::ARTMAP`: the supervised ART model to train.
- `x::RealVector`: the single sample feature vector to train upon.
- `y::Integer`: the label for supervisory training.
- `preprocessed::Bool=false`: optional, flag if the data has already been complement coded or not.
"""
train!(art::ARTMAP, x::RealVector, y::Integer ; preprocessed::Bool=false)
@doc raw"""
Initializes the supervised ARTMAP module for training with sample 'x' and label 'y', setting up the data configuration and instantiating the first category.
# Arguments
- `art::ARTMAP`: the ARTMAP module to initialize.
- `x::RealVector`: the sample to use for initialization.
- `y::Integer`: the initial supervised label.
# Examples
```julia-repl
julia> my_sfam = SFAM()
SFAM
opts: opts_SFAM
...
julia> initialize!(my_SFAM, [1, 2, 3, 4])
"""
initialize!(art::ARTMAP, x::RealVector, y::Integer)
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1637 | """
variants.jl
# Description
Includes convenience constructors for common variants of various ARTMAP modules.
"""
# -----------------------------------------------------------------------------
# DEFAULT ARTMAP
# -----------------------------------------------------------------------------
# Shared variant statement for Default ARTMAP
const _VARIANT_STATEMENT_DAM = """
Default ARTMAP is a variant of SFAM, using the [`AdaptiveResonance.opts_SFAM`](@ref) options.
This constructor sets the activation to `:choice_by_difference` in addition to the keyword argument options you provide.
"""
"""
Constructs a Default ARTMAP module using a SFAM module using Default ARTMAP's choice-by-difference activation function.
$(_VARIANT_STATEMENT_DAM)
# Arguments
- `kwargs`: keyword arguments of Simplified FuzzyARTMAP options (see [`AdaptiveResonance.opts_SFAM`](@ref))
# References:
1. G. P. Amis and G. A. Carpenter, 'Default ARTMAP 2,' IEEE Int. Conf. Neural Networks - Conf. Proc., vol. 2, no. September 2007, pp. 777-782, Mar. 2007, doi: 10.1109/IJCNN.2007.4371056.
"""
function DAM(;kwargs...)
return SFAM(;activation=:choice_by_difference, kwargs...)
end
"""
Implements a Default ARTMAP module with specified options.
$(_VARIANT_STATEMENT_DAM)
# Arguments
- `opts::opts_SFAM`: the Simplified FuzzyARTMAP options (see [`AdaptiveResonance.opts_SFAM`](@ref)).
"""
function DAM(opts::opts_SFAM)
return SFAM(opts)
end
"""
Implements a Default ARTMAP module's options.
$(_VARIANT_STATEMENT_DAM)
$(_OPTS_DOCSTRING)
"""
function opts_DAM(;kwargs...)
return opts_SFAM(;activation=:choice_by_difference, kwargs...)
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 18968 | """
common.jl
# Description
Common algorithmic types and functions used throughout the package.
"""
# -----------------------------------------------------------------------------
# TYPE ALIASES
# -----------------------------------------------------------------------------
"""
Definition of the ART module statistics dictionary, used to generate and store various logs during training and testing.
"""
const ARTStats = Dict{String, Any}
# -----------------------------------------------------------------------------
# TYPES
# -----------------------------------------------------------------------------
"""
Container to standardize training/testing data configuration.
This container declares if a data configuration has been setup, what the original and complement coded dimensions are, and what the minimums and maximums of the values along each feature dimension are.
"""
mutable struct DataConfig
"""
Flag if data has been setup yet or not.
"""
setup::Bool
"""
List of minimum values for each feature.
"""
mins::Vector{Float}
"""
List of maximum values for each feature.
"""
maxs::Vector{Float}
"""
Dimensionality of the feature vectors (i.e., number of features).
"""
dim::Int
"""
Complement coded feature dimensionality, twice the size of `dim`.
"""
dim_comp::Int
end
# -----------------------------------------------------------------------------
# CONSTRUCTORS
# -----------------------------------------------------------------------------
"""
Default constructor for a data configuration, not set up.
"""
function DataConfig()
DataConfig(
false, # setup
Array{Float}(undef, 0), # min
Array{Float}(undef, 0), # max
0, # dim
0 # dim_comp
)
end
"""
Convenience constructor for DataConfig, requiring only mins and maxs of the features.
This constructor is used when the mins and maxs differ across features. The dimension is inferred by the length of the mins and maxs.
# Arguments
- `mins::RealVector`: a vector of minimum values for each feature dimension.
- `maxs::RealVector`: a vector of maximum values for each feature dimension.
"""
function DataConfig(mins::RealVector, maxs::RealVector)
# Verify that the mins and maxs are the same length
length(mins) != length(maxs) && error("Mins and maxs must be the same length.")
# Get the dimension from one of the arrays
dim = length(mins)
# Initialize a Dataconfig with the explicit config
DataConfig(
true, # setup
mins, # min
maxs, # max
dim, # dim
dim * 2 # dim_comp
)
end
"""
Convenience constructor for DataConfig, requiring only a global min, max, and dim.
This constructor is used in the case that the feature mins and maxs are all the same respectively.
# Arguments
- `min::Real`: the minimum value across all features.
- `max::Real`: the maximum value across all features.
- `dim::Integer`: the dimension of the features, which must be provided because it cannot be inferred from just the minimum or maximum values.
"""
function DataConfig(min::Real, max::Real, dim::Integer)
DataConfig(
true, # setup
repeat([min], dim), # min
repeat([max], dim), # max
dim, # dim
dim * 2 # dim_comp
)
end
"""
Convenience constructor for DataConfig, requiring only the data matrix.
# Arguments
- `data::RealMatrix`: the 2-D batch of data to be used for inferring the data configuration.
"""
function DataConfig(data::RealMatrix)
# Create an empty dataconfig
config = DataConfig()
# Runthe setup upon the config using the data matrix for reference
data_setup!(config, data)
# Return the constructed DataConfig
return config
end
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
"""
Initializes an ARTStats dictionary with zero entries.
"""
function build_art_stats()
# Create the stats dictionary
stats = ARTStats()
# Initialize zero entries for each element
stats["M"] = 0.0
stats["T"] = 0.0
stats["bmu"] = 0
stats["mismatch"] = false
# Return the zero-initialized stats dictionary
return stats
end
"""
Logs common statistics of an ART module after a training/classification iteration.
# Arguments
- `art::ARTModule`: the ART module that just underwent training/classification.
- `bmu::Integer`: the best-matching unit integer index.
- `mismatch::Bool`: flag of whether there was a mismatch in this iteration.
"""
function log_art_stats!(art::ARTModule, bmu::Integer, mismatch::Bool)
# Overwrite the stats entries
art.stats["M"] = art.M[bmu]
art.stats["T"] = art.T[bmu]
art.stats["bmu"] = bmu
art.stats["mismatch"] = mismatch
# Return empty
return
end
"""
Returns the element-wise minimum between sample x and weight W.
# Arguments
- `x::RealVector`: the input sample.
- `W::RealVector`: the weight vector to compare the sample against.
"""
function element_min(x::RealVector, W::RealVector)
# Get the length of the sample
n_el = length(x)
# Create a destination in memory of zeros of type and size like the sample
min_vec = zero(x)
# Iterate over every element of the sample
for ix = 1:n_el
# Get and assign the minimum of the sample and weight at index ix
@inbounds min_vec[ix] = min(x[ix], W[ix])
end
# Return the element-minimum vector
return min_vec
# return @inbounds vec(minimum([x W], dims = 2))
end
"""
Convenience function to get the categorization performance of y_hat against y.
# Arguments
- `y_hat::IntegerVector`: the estimated labels.
- `y::IntegerVector`: the true labels.
"""
function performance(y_hat::IntegerVector, y::IntegerVector)
# Get the number of labels
n_y = length(y)
# Check lengths
if length(y_hat) != n_y
error("Label vectors must be the same length")
end
# Get the number of correct classifications
n_correct = 0
for ix = 1:n_y
if y_hat[ix] == y[ix]
n_correct += 1
end
end
# Return the performance as the number correct over the total number
return n_correct/n_y
end
"""
Returns the dimension of the data, enforcint the (dim, n_samples) convention of the package.
# Arguments
- `data::RealMatrix`: the 2-D data to infer the feature dimension of.
"""
function get_dim(data::RealMatrix)
# Return the correct dimension of the data
return size(data)[ART_DIM]
end
"""
Returns the number of samples, enforcing the convention of the package.
# Arguments
- `data::RealMatrix`: the 2-D data to infer the number of samples from.
"""
function get_n_samples(data::RealMatrix)
# Return the correct number of samples
return size(data)[ART_SAMPLES]
end
"""
Returns the (dim, n_samples) of the provided 2-D data matrix, enforcing the ART package convention.
# Arguments
- `data::RealMatrix`: the 2-D data to infer the feature dimension and number of samples from.
"""
function get_data_shape(data::RealMatrix)
# Get the dimension of the data
dim = get_dim(data)
# Get the number of samples of the data
n_samples = get_n_samples(data)
# Return the dimension and number of samples
return dim, n_samples
end
"""
Sets up the data config for the ART module before training.
This function crucially gets the original and complement-coded dimensions of the data, and it infers the bounds of the data (minimums and maximums) by the largest and smallest values along each feature dimension.
# Arguments
- `config::DataConfig`: the ART/ARTMAP module's data configuration object.
- `data::RealMatrix`: the 2-D batch of data to use for creating the data configuration.
"""
function data_setup!(config::DataConfig, data::RealMatrix)
if config.setup
@warn "Data configuration already set up, overwriting config"
else
config.setup = true
end
# Get the correct dimensionality and number of samples
config.dim = get_dim(data)
config.dim_comp = 2 * config.dim
# Compute the ranges of each feature
config.mins = zeros(config.dim)
config.maxs = zeros(config.dim)
for ix = 1:config.dim
config.mins[ix] = minimum(data[ix, :])
config.maxs[ix] = maximum(data[ix, :])
end
end
"""
Convenience method for setting up the DataConfig of an ART module in advance.
# Arguments
- `art::ARTModule`: the ART/ARTMAP module to manually configure the data config for.
- `data::RealArray`: the 2-D batch of data used to create the data config.
"""
function data_setup!(art::ARTModule, data::RealMatrix)
# Modify the DataConfig of the ART module directly
data_setup!(art.config, data)
end
"""
Get the characteristics of the data, taking account if a data config is passed.
If no DataConfig is passed, then the data characteristics come from the array itself.
Otherwise, use the config for the statistics of the data and the data array for the number of samples.
# Arguments
- `data::RealMatrix`: the 2-D data to be complement coded.
- `config::DataConfig=DataConfig()`: the data configuration for the ART/ARTMAP module.
"""
function get_data_characteristics(data::RealMatrix ; config::DataConfig=DataConfig())
# If the data is setup, use the config
if config.setup
# Just get the number of samples and use the config for everything else
n_samples = get_n_samples(data)
dim = config.dim
mins = config.mins
maxs = config.maxs
else
# Get the feature dimension and number of samples
dim, n_samples = get_data_shape(data)
# Get the ranges for each feature
mins = zeros(dim)
maxs = zeros(dim)
for ix = 1:dim
mins[ix] = minimum(data[ix, :])
maxs[ix] = maximum(data[ix, :])
end
end
return dim, n_samples, mins, maxs
end
"""
Normalize the data to the range [0, 1] along each feature.
# Arguments
- `data::RealVector`: the 1-D sample of data to normalize.
- `config::DataConfig=DataConfig()`: the data configuration from the ART/ARTMAP module.
"""
function linear_normalization(data::RealVector ; config::DataConfig=DataConfig())
# Vector normalization requires a setup DataConfig
if !config.setup
error("Attempting to complement code a vector without a setup DataConfig")
end
# Populate a new array with normalized values.
x_raw = zeros(config.dim)
# Iterate over each dimension
for i = 1:config.dim
denominator = config.maxs[i] - config.mins[i]
if denominator != 0
# If the denominator is not zero, normalize
x_raw[i] = (data[i] .- config.mins[i]) ./ denominator
else
# Otherwise, the feature is zeroed because it contains no useful information
x_raw[i] = zero(Int)
end
end
return x_raw
end
"""
Normalize the data to the range [0, 1] along each feature.
# Arguments
- `data::RealMatrix`: the 2-D batch of data to normalize.
- `config::DataConfig=DataConfig()`: the data configuration from the ART/ARTMAP module.
"""
function linear_normalization(data::RealMatrix ; config::DataConfig=DataConfig())
# Get the data characteristics
dim, n_samples, mins, maxs = get_data_characteristics(data, config=config)
# Populate a new array with normalized values.
x_raw = zeros(dim, n_samples)
# Verify that all maxs are strictly greater than mins
if !all(mins .< maxs)
error("Got a data max index that is smaller than the corresonding min")
end
# Iterate over each dimension
for i = 1:dim
denominator = maxs[i] - mins[i]
if denominator != 0
# If the denominator is not zero, normalize
x_raw[i, :] = (data[i, :] .- mins[i]) ./ denominator
else
# Otherwise, the feature is zeroed because it contains no useful information
x_raw[i, :] = zeros(length(x_raw[i, :]))
end
end
return x_raw
end
"""
Normalizes the data x to [0, 1] and returns the augmented vector [x, 1 - x].
# Arguments
- `data::RealArray`: the 1-D or 2-D data to be complement coded.
- `config::DataConfig=DataConfig()`: the data configuration for the ART/ARTMAP module.
"""
function complement_code(data::RealArray ; config::DataConfig=DataConfig())
# Normalize the data
x_raw = linear_normalization(data, config=config)
# Complement code the data and return a concatenated matrix
return vcat(x_raw, 1 .- x_raw)
end
"""
Creates an iterator object according to the ART/ARTMAP modules display settings for batch iteration.
# Arguments
- `opts::ARTOpts`: the ART/ARTMAP module's options containing display settings.
- `n_samples::Integer`: the number of iterations to create the iterator for.
"""
function get_iterator(opts::ARTOpts, n_samples::Integer)
# Construct the iterator
iter_raw = 1:n_samples
# If we want a progress bar, construct one. Otherwise, return the raw iterator
iter = opts.display ? ProgressBar(iter_raw) : iter_raw
# Return the selected iterator
return iter
end
"""
Updates the iteration of the ART/ARTMAP module, training or inference, according to its display settings.
# Arguments
- `art::ARTModule`: the ART/ARTMAP module being iterated upon.
- `iter::ARTIterator`: the iterator object used in the training/inference loop.
- `i::Integer`: the iteration during training/inference that the iterator should be updated to.
"""
function update_iter(art::ARTModule, iter::ARTIterator, i::Integer)
# Check explicitly for each, as the function definition restricts the types
if iter isa ProgressBar
set_description(iter, "Ep: $(art.epoch), ID: $(i), Cat: $(art.n_categories)")
elseif iter isa UnitRange
return
end
end
"""
Returns a sample from data array `x` at sample location `i`.
This function implements the convention that columns are samples while rows are features within samples.
# Arguments
- `x::RealMatrix`: the batch of data to grab a sample from.
- `i::Integer`: the index to get the sample from.
"""
function get_sample(x::RealMatrix, i::Integer)
# Return the sample at location
# return x[:, i]
return @inbounds x[:, i]
end
"""
Initializes the module for training in a single iteration.
The purpose of this function is mainly to handle the conditions of complement coding.
Fails if the module was incorrectly set up or if the module was not setup and the data was not preprocessed.
# Arguments
- `x::RealVector`: the sample used for initialization.
- `art::ARTModule`: the ART/ARTMAP module that will be trained on the sample.
- `preprocessed::Bool`: a required flag for if the sample has already been complement coded and normalized.
"""
function init_train!(x::RealVector, art::ARTModule, preprocessed::Bool)
# If the data is not preprocessed
if !preprocessed
# If the data config is not setup, not enough information to preprocess
if !art.config.setup
error("$(typeof(art)): cannot preprocess data before being setup.")
end
x = complement_code(x, config=art.config)
# If it is preprocessed and we are not setup
elseif !art.config.setup
# Get the dimension of the vector
dim_comp = length(x)
# If the complemented dimension is not even, error
if !iseven(dim_comp)
error("Declared that the vector is preprocessed, but it is not even")
end
# Half the complemented dimension and setup the DataConfig with that
dim = Int(dim_comp/2)
art.config = DataConfig(0, 1, dim)
end
return x
end
"""
Initializes the training loop for batch learning.
# Arguments
- `x::RealMatrix`: the data that is used for training.
- `art::ARTModule`: the ART/ARTMAP that will be trained.
- `preprocessed::Bool`: required flag for if the data has already been complement coded and normalized.
"""
function init_train!(x::RealMatrix, art::ARTModule, preprocessed::Bool)
# If the data is not preprocessed, then complement code it
if !preprocessed
# Set up the data config if training for the first time
!art.config.setup && data_setup!(art.config, x)
x = complement_code(x, config=art.config)
end
return x
end
"""
Initializes the classification loop for batch inference.
# Arguments
- `x::RealArray`: the data that is used for inference.
- `art::ARTModule`: the ART/ARTMAP module that will be used for inference.
- `preprocessed::Bool`: required flag for if the data has already been complement coded and normalized.
"""
function init_classify!(x::RealArray, art::ARTModule, preprocessed::Bool)
# If the data is not preprocessed
if !preprocessed
# If the data config is not setup, not enough information to preprocess
if !art.config.setup
error("$(typeof(art)): cannot preprocess data before being setup.")
end
# Dispatch to the correct complement code method (vector or matrix)
x = complement_code(x, config=art.config)
end
return x
end
"""
Predict categories of 'x' using the ART model.
Returns predicted categories 'y_hat.'
# Arguments
- `art::ARTModule`: ART or ARTMAP module to use for batch inference.
- `x::RealMatrix`: the 2-D dataset containing columns of samples with rows of features.
- `preprocessed::Bool=false`: flag, if the data has already been complement coded or not.
- `get_bmu::Bool=false`, flag, if the model should return the best-matching-unit label in the case of total mismatch.
# Examples
```julia-repl
julia> my_DDVFA = DDVFA()
DDVFA
opts: opts_DDVFA
...
julia> x, y = load_data()
julia> train!(my_DDVFA, x)
julia> y_hat = classify(my_DDVFA, y)
```
"""
function classify(art::ARTModule, x::RealMatrix ; preprocessed::Bool=false, get_bmu::Bool=false)
# Show a message if display is on
art.opts.display && @info "Testing $(typeof(art))"
# Preprocess the data
x = init_classify!(x, art, preprocessed)
# Data information and setup
n_samples = get_n_samples(x)
# Initialize the output vector
y_hat = zeros(Int, n_samples)
# Get the iterator based on the module options and data shape
iter = get_iterator(art.opts, n_samples)
for ix = iter
# Update the iterator if necessary
update_iter(art, iter, ix)
# Grab the sample slice
sample = get_sample(x, ix)
# Get the classification
y_hat[ix] = classify(art, sample, preprocessed=true, get_bmu=get_bmu)
end
return y_hat
end
"""
Checks the stopping conditions for an ART module.
# Arguments
- `art::ART`: the ART module to check stopping conditions for.
"""
function stopping_conditions(art::ARTModule)
return art.epoch >= art.opts.max_epoch
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1994 | """
common_docs.jl
# Description
Common docstrings for definitions in the package, to be included after those definitions are defined.
"""
# -----------------------------------------------------------------------------
# COMMON DOCUMENTATION
# -----------------------------------------------------------------------------
@doc """
Predict categories of a single sample of features 'x' using the ART model.
Returns predicted category 'y_hat.'
# Arguments
- `art::ARTModule`: ART or ARTMAP module to use for batch inference.
- `x::RealVector`: the single sample of features to classify.
- `preprocessed::Bool=false`: optional, flag if the data has already been complement coded or not.
- `get_bmu::Bool=false`: optional, flag if the model should return the best-matching-unit label in the case of total mismatch.
"""
classify(art::ARTModule, x::RealVector ; preprocessed::Bool=false, get_bmu::Bool=false)
# Common function for setting the threshold (sometimes just vigilance, sometimes a function of vigilance).
@doc """
Sets the match threshold of the ART/ARTMAP module as a function of the vigilance parameter.
Depending on selected ART/ARTMAP module and its options, this may be a function of other parameters as well.
# Arguments
- `art::ARTModule`: the ART/ARTMAP module for setting a new threshold.
"""
set_threshold!(art::ARTModule)
@doc """
Creates a category for the ARTModule module, expanding the weights and incrementing the category labels.
# Arguments
- `art::ARTModule`: the ARTModule module to add a category to.
- `x::RealVector`: the sample to use for adding a category.
- `y::Integer`: the new label for the new category.
"""
create_category!(art::ARTModule, x::RealVector, y::Integer)
"""
Common docstring for listing available match functions.
"""
const MATCH_FUNCTIONS_DOCS = join(MATCH_FUNCTIONS, ", ", " and ")
"""
Common docstring for listing available activation functions.
"""
const ACTIVATION_FUNCTIONS_DOCS = join(ACTIVATION_FUNCTIONS, ", ", " and ")
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1635 | """
conventions.jl
# Description
Constants defining conventions of the package along with abstract supertypes.
"""
# -----------------------------------------------------------------------------
# CONSTANTS AND CONVENTIONS
# -----------------------------------------------------------------------------
"""
AdaptiveResonance.jl convention for which 2-D dimension contains the feature dimension.
"""
const ART_DIM = 1
"""
AdaptiveResonance.jl convention for which 2-D dimension contains the number of samples.
"""
const ART_SAMPLES = 2
"""
The type of matrix used by the AdaptiveResonance.jl package, used to configure matrix growth behavior.
"""
const ARTMatrix = ElasticMatrix
"""
The type of vector used by the AdaptiveResonance.jl package, used to configure vector growth behvior.
"""
const ARTVector = Vector
# -----------------------------------------------------------------------------
# ABSTRACT TYPES
# -----------------------------------------------------------------------------
"""
Abstract supertype for all ART module options.
"""
abstract type ARTOpts end # ART module options
"""
Abstract supertype for both ART (unsupervised) and ARTMAP (supervised) modules.
"""
abstract type ARTModule end # ART modules
"""
Abstract supertype for all default unsupervised ART modules.
"""
abstract type ART <: ARTModule end # ART (unsupervised)
"""
Abstract supertype for all supervised ARTMAP modules.
"""
abstract type ARTMAP <: ARTModule end # ARTMAP (supervised)
"""
Acceptable iterators for ART module training and inference
"""
const ARTIterator = Union{UnitRange, ProgressBar}
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 2500 | """
docstrings.jl
# Description
A collection of common docstrings and docstring templates for the package.
"""
# -----------------------------------------------------------------------------
# DOCSTRING TEMPLATES
# -----------------------------------------------------------------------------
# Constants template
@template CONSTANTS =
"""
$(FUNCTIONNAME)
# Description
$(DOCSTRING)
"""
# Types template
@template TYPES =
"""
$(TYPEDEF)
# Summary
$(DOCSTRING)
# Fields
$(TYPEDFIELDS)
"""
# Template for functions, macros, and methods (i.e., constructors)
@template (FUNCTIONS, METHODS, MACROS) =
"""
$(TYPEDSIGNATURES)
# Summary
$(DOCSTRING)
# Method List / Definition Locations
$(METHODLIST)
"""
# -----------------------------------------------------------------------------
# COMMON DOCUMENTATION CONSTANTS
# -----------------------------------------------------------------------------
"""
Docstring prefix denoting that the constant is used as a common docstring element for other docstrings.
"""
const _COMMON_DOC = "Common docstring:"
"""
$(_COMMON_DOC) shared options docstring, inserted at the end of `opts_<...>` structs.
"""
const _OPTS_DOCSTRING = """
These options are a [`Parameters.jl`](https://github.com/mauro3/Parameters.jl) struct, taking custom options keyword arguments.
Each field has a default value listed below.
"""
"""
$(_COMMON_DOC) shared argument docstring for ART module arguments.
"""
const _ARG_ART = """
- `art::ARTModule`: the ARTModule module.
"""
"""
$(_COMMON_DOC) shared argument docstring for the input sample of features.
"""
const _ARG_X = """
- `x::RealVector`: the sample vector to use.
"""
"""
$(_COMMON_DOC) shared argument docstring for the weight vector.
"""
const _ARG_W = """
- `W::RealVector`: the weight vector to use.
"""
"""
$(_COMMON_DOC) shared argument docstring for the index of the weight column.
"""
const _ARG_INDEX = """
- `index::Integer`: the index of the weight column to use.
"""
"""
$(_COMMON_DOC) shared arguments string for methods using an ART module, sample 'x', and weight vector 'W'.
"""
const _ARG_ART_X_W = """
# Arguments
$(_ARG_ART)
$(_ARG_X)
$(_ARG_W)
"""
"""
$(_COMMON_DOC) shared arguments string for functions updating a column in a matrix.
"""
const _ARGS_MATRIX_REPLACE = """
# Arguments
- `mat::RealMatrix`: the matrix to update with a replaced column vector.
- `vec::RealVector`: the vector to put in the matrix at the column index.
- `index::Integer`: the column index to put the vector.
"""
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 784 | """
lib.jl
# Description
Aggregates all common types and functions that are used throughout `AdaptiveResonance.jl`.
# Authors
- Sasha Petrenko <[email protected]>
"""
# -----------------------------------------------------------------------------
# DEPENDENCIES
# -----------------------------------------------------------------------------
# Common docstrings and their templates
include("docstrings.jl")
# Abstract types and constants defining used types
include("conventions.jl")
# Algorithmic common types and functions
include("common.jl")
# Non-algorithmic low-level functions
include("subroutines.jl")
# Activation, match, and update functions and their drivers
include("symbols.jl")
# Common documentation of multiply-dispatched functions
include("common_docs.jl")
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1322 | """
subroutines.jl
# Description
Common low-level functions, such as for operating on matrices and vectors.
"""
# -----------------------------------------------------------------------------
# COMMON LOW-LEVEL FUNCTIONS
# -----------------------------------------------------------------------------
"""
Replaces a matrix element with a vector at the column index.
This function dispatches to the low-level replacement strategy.
$_ARGS_MATRIX_REPLACE
"""
function replace_mat_index!(mat::RealMatrix, vec::RealVector, index::Integer)
unsafe_replace_mat_index!(mat, vec, index)
end
"""
Low-level function for unsafely replacing a matrix column with a given vector.
$_ARGS_MATRIX_REPLACE
"""
function unsafe_replace_mat_index!(mat::RealMatrix, vec::RealVector, index::Integer)
@inbounds mat[:, index] = vec
end
"""
Extends a vector to a goal length with zeros of its element type to accommodate in-place updates.
# Arguments
- `vec::Vector{T}`: a vector of arbitrary element type.
- `goal_len::Integer`: the length that the vector should be.
"""
function accommodate_vector!(vec::Vector{T}, goal_len::Integer) where {T}
# While the the vector is not the correct length
while length(vec) < goal_len
# Push a zero of the type of the vector elements
push!(vec, zero(T))
end
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 4758 | """
symbols.jl
# Description
Symbols for macro evaluation of activation, match, and learning functions.
"""
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
"""
Low-level common function for computing the 1-norm of the element minimum of a sample and weights.
# Arguments
$(_ARG_X)
$(_ARG_W)
"""
function x_W_min_norm(x::RealVector, W::RealVector)
# return @inbounds norm(element_min(x, get_sample(W, index)), 1)
return norm(element_min(x, W), 1)
end
"""
Low-level common function for computing the 1-norm of just the weight vector.
# Arguments
$(_ARG_W)
"""
function W_norm(W::RealVector)
return norm(W, 1)
end
"""
Basic match function.
$(_ARG_ART_X_W)
"""
function basic_match(art::ARTModule, x::RealVector, W::RealVector)
# return norm(element_min(x, get_sample(W, index)), 1) / art.config.dim
return x_W_min_norm(x, W) / art.config.dim
end
"""
Unnormalized match function.
$(_ARG_ART_X_W)
"""
function unnormalized_match(_::ARTModule, x::RealVector, W::RealVector)
# return norm(element_min(x, get_sample(W, index)), 1) / art.config.dim
return x_W_min_norm(x, W)
end
"""
Simplified FuzzyARTMAP activation function.
$(_ARG_ART_X_W)
"""
function basic_activation(art::ARTModule, x::RealVector, W::RealVector)
# return norm(element_min(x, get_sample(W, index)), 1) / (art.opts.alpha + norm(get_sample(W, index), 1))
return x_W_min_norm(x, W) / (art.opts.alpha + W_norm(W))
end
"""
Low-level subroutine for the gamma match function with a precomputed gamma activation.
# Arguments
$(_ARG_ART)
$(_ARG_W)
- `gamma_act::Real`: the precomputed gamma activation value.
"""
function gamma_match_sub(art::ARTModule, W::RealVector, gamma_act::Real)
return (W_norm(W) ^ art.opts.gamma_ref) * gamma_act
end
"""
Gamma-normalized match function, recomputing the gamma activation value.
$(_ARG_ART_X_W)
"""
function gamma_match(art::ARTModule, x::RealVector, W::RealVector)
return gamma_match_sub(art, W, gamma_activation(art, x, W))
end
"""
Gamma-normalized match function, passing a precomputed gamma activation value.
$(_ARG_ART_X_W)
- `gamma_act::Real`: the precomputed gamma activation value.
"""
function gamma_match(art::ARTModule, _::RealVector, W::RealVector, gamma_act::Real)
return gamma_match_sub(art, W, gamma_act::Real)
end
"""
Gamma-normalized activation funtion.
$(_ARG_ART_X_W)
"""
function gamma_activation(art::ARTModule, x::RealVector, W::RealVector)
return basic_activation(art, x, W) ^ art.opts.gamma
end
"""
Default ARTMAP's choice-by-difference activation function.
$(_ARG_ART_X_W)
"""
function choice_by_difference(art::ARTModule, x::RealVector, W::RealVector)
return (
x_W_min_norm(x, W)
+ (1 - art.opts.alpha) * (art.config.dim - W_norm(W))
)
end
"""
Evaluates the match function of the ART/ARTMAP module on sample 'x' with weight 'W'.
Passes additional arguments for low-level optimizations using function dispatch.
# Arguments
$(_ARG_ART)
$(_ARG_X)
$(_ARG_INDEX)
"""
function art_match(art::ARTModule, x::RealVector, index::Integer, args...)
return eval(art.opts.match)(art, x, get_sample(art.W, index), args...)
end
"""
Evaluates the activation function of the ART/ARTMAP module on the sample 'x' with weight 'W'.
Passes additional arguments for low-level optimizations using function dispatch.
# Arguments
$(_ARG_ART)
$(_ARG_X)
$(_ARG_INDEX)
"""
function art_activation(art::ARTModule, x::RealVector, index::Integer, args...)
return eval(art.opts.activation)(art, x, get_sample(art.W, index), args...)
end
"""
Basic weight update function.
$(_ARG_ART_X_W)
"""
function basic_update(art::ARTModule, x::RealVector, W::RealVector)
return art.opts.beta * element_min(x, W) + W * (1.0 - art.opts.beta)
end
"""
Evaluates the ART module's learning/update method.
# Arguments
$(_ARG_ART)
$(_ARG_X)
$(_ARG_INDEX)
"""
function art_learn(art::ARTModule, x::RealVector, index::Integer)
return eval(art.opts.update)(art, x, get_sample(art.W, index))
end
# -----------------------------------------------------------------------------
# ENUMERATIONS
# -----------------------------------------------------------------------------
"""
Enumerates all of the update functions available in the package.
"""
const UPDATE_FUNCTIONS = [
:basic_update,
]
"""
Enumerates all of the match functions available in the package.
"""
const MATCH_FUNCTIONS = [
:basic_match,
:gamma_match,
]
"""
Enumerates all of the activation functions available in the package.
"""
const ACTIVATION_FUNCTIONS = [
:basic_activation,
:unnormalized_match,
:choice_by_difference,
:gamma_activation,
]
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 547 | """
runtests.jl
# Description
The entry point to unit tests for the AdaptiveResonance.jl package.
"""
# -----------------------------------------------------------------------------
# DEPENDENCIES
# -----------------------------------------------------------------------------
using SafeTestsets
# -----------------------------------------------------------------------------
# SAFETESTSETS
# -----------------------------------------------------------------------------
@safetestset "All Test Sets" begin
include("test_sets.jl")
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 956 | """
test_sets.jl
# Description
The main collection of tests for the AdaptiveResonance.jl package.
This file loads common utilities and aggregates all other unit tests files.
"""
using
AdaptiveResonance,
DelimitedFiles,
Logging,
Test
# Set the log level
LogLevel(Logging.Info)
# Auxiliary generic functions for loading data, etc.
include("test_utils.jl")
# Load the data and test across all supervised modules
data = load_iris("data/Iris.csv")
# @testset "AdaptiveResonance.jl" begin
# # Module loading
# include("modules.jl")
# end # @testset "AdaptiveResonance.jl"
@testset "AdaptiveResonance" begin
@info "------- ADAPTIVERESONANCE TESTS -------"
include("adaptiveresonance/adaptiveresonance_tests.jl")
end
@testset "ART" begin
@info "------- ART TESTS -------"
include("art/art_tests.jl")
end
@testset "ARTMAP" begin
@info "------- ARTMAP TESTS -------"
include("artmap/artmap_tests.jl")
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 4731 | """
test_utils.jl
# Description
A set of common struct and function utilities for AdaptiveResonance.jl unit tests.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
using
DelimitedFiles,
NumericalTypeAliases
# -----------------------------------------------------------------------------
# STRUCTS
# -----------------------------------------------------------------------------
"""
A basic struct for encapsulating the four components of supervised training.
"""
struct DataSplit
"""
The training feature samples.
Dimensions are `(feature-dim, sample-index)`.
"""
train_x::Matrix{Float}
"""
The testing feature samples.
Dimensions are `(feature-dim, sample-index)`.
"""
test_x::Matrix{Float}
"""
A vector of training labels.
"""
train_y::Vector{Int}
"""
A vector of testing labels.
"""
test_y::Vector{Int}
end
# -----------------------------------------------------------------------------
# METHODS
# -----------------------------------------------------------------------------
"""
Return a DataSplit struct that is split by the ratio (e.g. 0.8).
# Arguments
- `data_x::RealMatrix`: a 2-D matrix of samples with convention (features, samples).
- `data_y::RealVector`: a 1-D vector of integered labels.
- `ratio::Real`: the ratio for the train/test split β (0, 1).
"""
function DataSplit(data_x::RealMatrix, data_y::RealVector, ratio::Real)
_, n_data = size(data_x)
split_ind = Integer(floor(n_data*ratio))
train_x = data_x[:, 1:split_ind]
test_x = data_x[:, split_ind + 1:end]
train_y = data_y[1:split_ind]
test_y = data_y[split_ind + 1:end]
return DataSplit(train_x, test_x, train_y, test_y)
end
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
"""
Train and test an ART module.
# Arguments
- `art::ARTModule`: the ART or ARTMAP module to train and test.
- `data::DataSplit`: the struct containing a train/test split.
- `supervised::Bool=false`: flag for using supervised learning for ART modules (i.e., ARTMAP modules are always supervised).
- `train_opts::NamedTuple=NamedTuple()`: keyword options to pass to the `train!` function.
- `test_opts::NamedTuple=NamedTuple()`: keyword options to pass to the `classify` function.
"""
function train_test_art(
art::ARTModule,
data::DataSplit ;
supervised::Bool=false,
train_opts::NamedTuple=NamedTuple(),
test_opts::NamedTuple=NamedTuple()
)
# Default performance to undefined
perf = NaN
# If the module is unsupervised by default
if art isa ART
# Check if training with lazy supervision
if supervised
# Train with the supervised ART syntax
train!(art, data.train_x, y=data.train_y; train_opts...)
y_hat = classify(art, data.test_x; test_opts...)
# Calculate performance
perf = performance(y_hat, data.test_y)
# Otherwise, train in an unsupervised fashion
else
train!(art, data.train_x; train_opts...)
end
# Otherwise, necessarily train on a supervised model
elseif art isa ARTMAP
# Train and classify
train!(art, data.train_x, data.train_y; train_opts...)
y_hat = classify(art, data.test_x; test_opts...)
# Calculate performance
perf = performance(y_hat, data.test_y)
else
error("Incompatible ART module passed for testing")
end
# If the performance is not a NaN (potentially unsupervsied), then display perf
if !isnan(perf)
@info "$(typeof(art)): performance is $perf"
end
return perf
end
"""
Loads the iris dataset for testing and examples.
# Arguments
- `data_path::AbstractString`: path containing the Iris dataset.
- `split_ratio::Real = 0.8`: train/test split ration β (0, 1).
"""
function load_iris(data_path::AbstractString ; split_ratio::Real = 0.8)
raw_data = readdlm(data_path, ',')
labels = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
raw_x = Matrix{Real}(raw_data[2:end, 2:5])
raw_y_labels = raw_data[2:end, 6]
raw_y = Vector{Int}(undef, 0)
for ix in eachindex(raw_y_labels)
for jx in eachindex(labels)
if raw_y_labels[ix] == labels[jx]
push!(raw_y, jx)
end
end
end
# Julia is column-major, so use columns for features
raw_x = permutedims(raw_x)
# Create the datasplit object
data = DataSplit(raw_x, raw_y, split_ratio)
return data
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 356 | """
adaptiveresonance_tests.jl
# Description
Includes all of the AdaptiveResonance module tests.
"""
@testset "Exceptions" begin
include("exceptions.jl")
end
@testset "Initialization" begin
include("initialization.jl")
end
@testset "Common" begin
include("common.jl")
end
@testset "Performance" begin
include("performance.jl")
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 2333 | """
common.jl
# Description
Tests of common code for the `AdaptiveResonance.jl` package.
"""
@testset "common" begin
@info "------- Common Code Tests -------"
# Example arrays
three_by_two = [1 2; 3 4; 5 6]
# Test DataConfig constructors
@info "Testing DataConfig..."
dc1 = DataConfig() # Default constructor
dc2 = DataConfig(0, 1, 2) # When min and max are same across all features
dc3 = DataConfig([0, 1], [2, 3]) # When min and max differ across features
dc4 = DataConfig(three_by_two) # When a data matrix is provided
# # Test get_n_samples
# @info "Testing get_n_samples..."
# @test get_n_samples([1,2,3]) == 1 # 1-D array case
# @test get_n_samples(three_by_two) == 2 # 2-D array case
# Test data_setup!
@info "Testing data_setup!..."
data_setup!(DDVFA(), three_by_two)
data_setup!(DDVFA().config, three_by_two)
# Test breaking situations
@info "Testing common code error handling..."
@test_throws ErrorException performance([1,2],[1,2,3])
@test_logs (:warn,) AdaptiveResonance.data_setup!(dc3, three_by_two)
bad_config = DataConfig(1, 0, 3)
@test_throws ErrorException linear_normalization(three_by_two, config=bad_config)
end
@testset "constants.jl" begin
@info "------- Constants Tests -------"
# Test that constants are exported
art_constants = [
ADAPTIVERESONANCE_VERSION,
ART_MODULES,
ARTMAP_MODULES,
ADAPTIVERESONANCE_MODULES,
DDVFA_METHODS,
MATCH_FUNCTIONS,
ACTIVATION_FUNCTIONS,
]
for local_constant in art_constants
@test @isdefined local_constant
end
end
@testset "kwargs" begin
@info "------- Kwargs test -------"
# Iterate over all modules
for art in ADAPTIVERESONANCE_MODULES
art_module = art(
alpha=1e-3,
display=false,
sort=true,
)
end
end
@testset "Incremental train!" begin
# Create an FuzzyART module
art = FuzzyART()
# Create a small batch of data
dim = 2
n_samples = 3
x = rand(dim, n_samples)
# Setup the ART data config
data_setup!(art, x)
# Train incrementally before without batch operation
for i = 1:n_samples
train!(art, x)
end
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1985 | """
exceptions.jl
# Description
Tests the edge cases and exceptions of the entire `AdaptiveResonance.jl` package.
"""
# Mismatch testset
# Enough ART modules do not encounter mismatch during the normal traing routines that these can be tested together.
@testset "Mismatch" begin
@info "------- Mismatch test -------"
# ART
arts = [art(display=false) for art in ART_MODULES]
artmaps = [artmap(display=false) for artmap in ARTMAP_MODULES]
# Train on disparate data
local_data = [
0.0 1.0;
0.0 1.0;
]
local_labels= [1, 1]
# Test on data that is still within range but equally far from other points
test_data = [0.5, 0.5]
# Get mismatch in unsupervised ART modules
for art in arts
train!(art, local_data)
classify(art, test_data)
end
# Get mismatch in supervised ARTMAP modules
for artmap in artmaps
train!(artmap, local_data, local_labels)
classify(artmap, test_data)
end
end
@testset "init_train!" begin
# Create a new FuzzyART module
art = FuzzyART()
# Test that the new module's data config is not setup
@test art.config.setup == false
# Test that initializing training fails if the data is not preprocessed
# and the data config is not setup (using the RealVector function)
x = rand(2)
@test_throws ErrorException AdaptiveResonance.init_train!(x, art, false)
# Create faulty data and say that it is preprocessed
x_cc_bad = rand(3)
@test_throws ErrorException AdaptiveResonance.init_train!(x_cc_bad, art, true)
end
@testset "init_classify!" begin
# Create a new FuzzyART module
art = FuzzyART()
# Test that the new module's data config is not setup
@test art.config.setup == false
# Test that initializing classification fails if the data is not
# preprocessed and the data config is not setup
@test_throws ErrorException AdaptiveResonance.init_classify!(rand(2, 2), art, false)
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 419 | """
initialization.jl
# Description
Contains tests for module initialization.
"""
@testset "Initialization" begin
# Default constructors
fam = FAM()
dam = DAM()
sfam = SFAM()
dvfa = DVFA()
ddvfa = DDVFA()
# Specify constructors
fam_2 = FAM(opts_FAM())
dam_2 = DAM(opts_DAM())
sfam_2 = SFAM(opts_SFAM())
dvfa_2 = DVFA(opts_DVFA())
ddvfa_2 = DDVFA(opts_DDVFA())
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 2030 | """
performance.jl
# Description
A test of the performance of every ART and ARTMAP module.
"""
@testset "Training Test" begin
@info "------- Training test -------"
# All ART modules
arts = ADAPTIVERESONANCE_MODULES
n_arts = length(arts)
# All common ART options
art_opts = [
# (display = true,),
(display = false, sort = true,),
(display = false, sort = false,),
]
# Specific ART options
art_specifics = Dict(
DDVFA => [
(gamma_normalization=true,),
(gamma_normalization=false,),
(uncommitted=true,),
],
FuzzyART => [
(gamma_normalization=true,),
(gamma_normalization=false,),
(uncommitted=true,),
],
SFAM => [
(uncommitted=true,),
]
)
# All test option permutations
test_opts = [
(get_bmu = true,),
(get_bmu = false,),
]
n_test_opts = length(test_opts)
# Performance baseline for all algorithms
perf_baseline = 0.7
# Iterate over all ART modules
for ix = 1:n_arts
# Iterate over all test options
for jx = 1:n_test_opts
# If we are testing a module with different options, merge
if haskey(art_specifics, arts[ix])
local_art_opts = vcat(art_opts, art_specifics[arts[ix]])
else
local_art_opts = art_opts
end
# Iterate over all options
for kx in eachindex(local_art_opts)
# Only do the unsupervised method if we have an ART module (not ARTMAP)
if arts[ix] isa ART
# Unsupervised
train_test_art(arts[ix](;local_art_opts[kx]...), data; test_opts=test_opts[jx])
end
# Supervised
@test train_test_art(arts[ix](;local_art_opts[kx]...), data; supervised=true, test_opts=test_opts[jx]) >= perf_baseline
end
end
end
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 188 | """
art_tests.jl
# Description
Includes all of the ART module tests.
"""
@testset "DDVFA" begin
include("ddvfa.jl")
end
@testset "FuzzyART" begin
include("fuzzyart.jl")
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 666 | """
ddvfa.jl
# Description
DDVFA-specific test sets.
"""
@testset "Convenience functions" begin
my_art = DDVFA()
train!(my_art, data.train_x)
# Convenience functions
W = AdaptiveResonance.get_W(my_art)
n_vec = AdaptiveResonance.get_n_weights_vec(my_art)
n_weights = AdaptiveResonance.get_n_weights(my_art)
n_F2 = length(my_art.F2)
# Test these values
@test ndims(W) == 1 # W is a list
@test length(W) == n_F2 # W has n_F2 weights
@test n_vec isa Vector # n_vec is a vector
@test length(n_vec) == n_F2 # n_vec describes n_F2 nodes
@test n_weights isa Int # n_weights is one number
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 1325 | """
fuzzyart.jl
# Description
FuzzyART-specific test sets.
"""
@testset "FuzzyART" begin
@info "------- FuzzyART Test -------"
# FuzzyART initialization and training
my_FuzzyART = FuzzyART()
train!(my_FuzzyART, data.train_x)
# Compute a local sample for FuzzyART similarity method testing
local_sample = complement_code(data.train_x[:, 1], config=my_FuzzyART.config)
# Compute the local activation and match
AdaptiveResonance.activation_match!(my_FuzzyART, local_sample)
# Test that every method and field name computes
for method in DDVFA_METHODS
results = Dict()
# for field_name in field_names
for activation in (true, false)
results[activation] = AdaptiveResonance.similarity(method, my_FuzzyART, local_sample, activation)
# @test isapprox(truth[method][field_name], results[field_name])
end
@info "Method: $method" results
end
# Check the error handling of the similarity function
# Access the wrong similarity metric keyword ("asdf")
# @test_throws ErrorException AdaptiveResonance.similarity("asdf", my_FuzzyART, "T", local_sample)
# Access the wrong output function ("A")
# @test_throws ErrorException AdaptiveResonance.similarity("centroid", my_FuzzyART, "A", local_sample)
end | AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
|
[
"MIT"
] | 0.8.5 | 4ab15cc36eed95363a6aad32c88879f83f5183f3 | code | 146 | """
adaptiveresonance_tests.jl
# Description
Includes all of the ARTMAP tests.
"""
@testset "ARTSCENE" begin
include("artscene.jl")
end
| AdaptiveResonance | https://github.com/AP6YC/AdaptiveResonance.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.