code
stringlengths 1
13.8M
|
---|
ml_mcmc_parameter_index_matrix <- function(NM)
{
m2 <- matrix(NA, nrow=NM, ncol=NM)
hh <- 0
for (mm in 1:NM){
m2[mm,1:mm] <- hh + ( 1:mm )
hh <- hh + mm
}
res <- list( np=hh, matr=m2)
return(res)
} |
library(hamcrest)
expected <- c(0x1.b965fef11710cp+4 + 0x1.3d577dbd23776p-1i, -0x1.a75fa6d581594p+4 + -0x1.5a6c8eadaad84p-1i,
0x1.d7ab334476eafp+4 + 0x1.772a1a412584ap-1i, -0x1.d90e7688ad63dp+5 + -0x1.9388dda619b34p-1i,
0x1.39c3c8b005518p+5 + 0x1.af81adfd2ec14p-1i, -0x1.8e87994fd3de5p+5 + -0x1.cb0d7a28bdb05p-1i,
0x1.7f32aafefc482p+5 + 0x1.e6254c95df783p-1i, -0x1.60d4dba51c8ccp+4 + -0x1.0061267f434dap+0i,
0x1.581fc789ac50ap+5 + 0x1.0d6ee11218a7cp+0i, -0x1.5ea8ae40d534ap+5 + -0x1.1a3889c1626c2p+0i,
0x1.6c0c7d588ad3bp+5 + 0x1.26bae57d9224p+0i, -0x1.dc9cd75d517b6p+4 + -0x1.32f2cb3a8b3a2p+0i,
0x1.53ebf4a54e143p+4 + 0x1.3edd24bc092abp+0i, -0x1.6d9cfb60907d1p+5 + -0x1.4a76ef5d44c02p+0i,
0x1.0a8b25c702f7dp+5 + 0x1.55bd3cd3a66b2p+0i, -0x1.661bf7a205e82p+5 + -0x1.60ad33ec5425p+0i,
0x1.0b096ae52d7f4p+6 + 0x1.6b4411446b137p+0i, -0x1.99899f7492644p+5 + -0x1.757f27fbb6bf8p+0i,
0x1.9ef89013e1b6dp+4 + 0x1.7f5be261b8002p+0i, -0x1.a2c8e5e1015d3p+4 + -0x1.88d7c29cd0a03p+0i,
0x1.08629be3d1d92p+5 + 0x1.91f0634b68eecp+0i, -0x1.1af6707bba2d7p+5 + -0x1.9aa3781ee6dcap+0i,
0x1.549e58bad18aep+4 + 0x1.a2eece704f805p+0i, -0x1.334a4d749ee06p+5 + -0x1.aad04dce6d2b8p+0i,
0x1.ab76b623af93dp+5 + 0x1.b245f88556719p+0i, -0x1.a3653b3599b8ep+5 + -0x1.b94dec1f33c54p+0i,
0x1.6a5d7900e42ep+4 + 0x1.bfe661de230ccp+0i, -0x1.10386b4e9eb32p+4 + -0x1.c60daf2f1ab0cp+0i,
0x1.433acf57f5b78p+5 + 0x1.cbc24615aeb05p+0i, -0x1.6aba3af588eb8p+4 + -0x1.d102b5909cd69p+0i,
0x1.1c42a0c39edd6p+5 + 0x1.d5cda9f706a41p+0i, -0x1.029073800e3bdp+5 + -0x1.da21ed4e40ef4p+0i,
0x1.4559d0bdbf09cp+5 + 0x1.ddfe67982464dp+0i, -0x1.0541971af2a04p+5 + -0x1.e1621f19ca402p+0i,
0x1.4cabb84e18263p+5 + 0x1.e44c389aa3ep+0i, -0x1.2bd362e214e9ep+5 + -0x1.e6bbf79bdd3bp+0i,
0x1.fffb34b7dab08p+4 + 0x1.e8b0be87fbd13p+0i, -0x1.11d5f2997e245p+5 + -0x1.ea2a0edaaecccp+0i,
0x1.21d6d895d4521p+4 + 0x1.eb278940c5831p+0i, -0x1.ee3b7cb8790ddp+5 + -0x1.eba8edb044c66p+0i,
0x1.b825a9cab1a8ep+5 + 0x1.ebae1b7894a78p+0i, -0x1.924682558e489p+5 + -0x1.eb37114ac2a34p+0i,
0x1.0589ba9e0dbd5p+5 + 0x1.ea43ed39d6626p+0i, -0x1.c7257d02841b3p+4 + -0x1.e8d4ecb338906p+0i,
0x1.31935a718ecap+5 + 0x1.e6ea6c6f2dfedp+0i, -0x1.63c2968f72606p+5 + -0x1.e484e8596aed2p+0i,
0x1.47e370d1557a9p+4 + 0x1.e1a4fb71c422dp+0i, -0x1.a98058bac3f46p+4 + -0x1.de4b5fa506703p+0i,
0x1.68011f86233e3p+5 + 0x1.da78ed9dfda04p+0i, -0x1.d974b640083dcp+2 + -0x1.d62e9c8eb746fp+0i,
0x1.3155af300c6c2p+4 + 0x1.d16d81f20f3b5p+0i, -0x1.3ecf7bbdc69f6p+5 + -0x1.cc36d14595079p+0i,
0x1.72f856552cab7p+5 + 0x1.c68bdbbbdcdbcp+0i, -0x1.c27981e06247ep+4 + -0x1.c06e0fe74edp+0i,
0x1.efe97390ceec4p+5 + 0x1.b9def95d8a772p+0i, -0x1.4946c3c928131p+5 + -0x1.b2e040537600ep+0i,
0x1.258c22d6b132p+4 + 0x1.ab73a93212221p+0i, -0x1.1f00600239306p+5 + -0x1.a39b14242e0cp+0i,
0x1.5ce8bf239549ep+5 + 0x1.9b587c9d17fa1p+0i, -0x1.3b2b3a0870a0cp+4 + -0x1.92adf8d869586p+0i,
0x1.31e3f7064507p+5 + 0x1.899db9530ea6cp+0i, -0x1.f2d8545874136p+4 + -0x1.802a083dad2cap+0i,
0x1.409c3d1fa88e2p+5 + 0x1.765548e88a9a6p+0i, -0x1.94ef624e375dap+4 + -0x1.6c21f7291ba04p+0i,
0x1.f3fc0019f1933p+4 + 0x1.6192a6b9609e2p+0i, -0x1.9b527f4245c03p+5 + -0x1.56aa02913926bp+0i,
0x1.3ff10b7ca3a38p+5 + 0x1.4b6acc39d824bp+0i, -0x1.85e8773299a49p+4 + -0x1.3fd7db1b84aeap+0i,
0x1.9143e339de226p+4 + 0x1.33f41bc5d409cp+0i, -0x1.98f9d2b38647ep+5 + -0x1.27c28f328baf8p+0i,
0x1.535e52dd4b508p+5 + 0x1.1b464a035afep+0i, -0x1.64d16cae1f29cp+4 + -0x1.0e8273ba9d632p+0i,
0x1.a97582bf422eep+4 + 0x1.017a45ef5813cp+0i, -0x1.212d3588afbf8p+4 + -0x1.e86216f94bdc7p-1i,
0x1.a3b6d0b14d3ccp+5 + 0x1.cd543f598da58p-1i, -0x1.b18d368a9177bp+4 + -0x1.b1d1dac01071fp-1i,
0x1.625eaeebb3103p+5 + 0x1.95e1dc5f7ab9p-1i, -0x1.51af3c2fa4a6p+5 + -0x1.798b531aff54ep-1i,
0x1.0f7c77b1081cap+5 + 0x1.5cd567bddec61p-1i, -0x1.7da5236165c32p+5 + -0x1.3fc75b2c5d49ep-1i,
0x1.742e559f936bap+5 + 0x1.2268848ea2641p-1i, -0x1.962c7dbee957ep+5 + -0x1.04c04f75f7f04p-1i,
0x1.3c44daf4f2b63p+5 + 0x1.cdac73f9c3daap-2i, -0x1.e238311626875p+4 + -0x1.9163a5c50d7b4p-2i,
0x1.d822b82a01c06p+4 + 0x1.54b56f43c6a28p-2i, -0x1.28ea9145c5ec3p+5 + -0x1.17b1250a8ca0ap-2i,
0x1.00f72f9c95c22p+5 + 0x1.b4cc62da3aaf4p-3i, -0x1.f49d09bc8cb43p+4 + -0x1.39c82132ad0e8p-3i,
0x1.bc84e00c1f5ecp+5 + 0x1.7ce932b6c6fd8p-4i, -0x1.d601dd04e0846p+5 + -0x1.0bc3ce61d9eep-5i,
0x1.e97bb2a33c612p+5 + -0x1.c51cdd4ff042p-6i, -0x1.912f8747197b8p+5 + 0x1.6853b7741f438p-4i,
0x1.b13ebaf381c38p+4 + -0x1.2f8297834c8acp-3i, -0x1.b6a785f577997p+4 + 0x1.aa8ea5711b5f4p-3i,
0x1.44167b97ee129p+5 + -0x1.129777a718d42p-2i, -0x1.861c5b161425bp+5 + 0x1.4fa23d0f92d06p-2i,
0x1.c33f67ab13a6ep+4 + -0x1.8c5836fb102ap-2i, -0x1.12196b04c6d66p+5 + 0x1.c8aa0eded959p-2i,
0x1.382bb1ecd8b67p+4 + -0x1.024443be362f9p-1i, -0x1.d716f0770adc9p+4 + 0x1.1ff2405d8d1ddp-1i,
0x1.55b2da3761fb8p+4 + -0x1.3d577dbd23727p-1i, -0x1.524d083dfecb7p+5 + 0x1.5a6c8eadaad61p-1i,
0x1.5053b761300a9p+5 + -0x1.772a1a41257d3p-1i, -0x1.583927584d8cap+5 + 0x1.9388dda619b36p-1i,
0x1.071cf9c902333p+5 + -0x1.af81adfd2ece9p-1i, -0x1.0c038ddecd1f7p+5 + 0x1.cb0d7a28bda79p-1i,
0x1.75e02a2f628a6p+5 + -0x1.e6254c95df6dcp-1i, -0x1.9367c923b6722p+4 + 0x1.0061267f434d4p+0i,
0x1.17f162d9fcf1cp+5 + -0x1.0d6ee11218aeep+0i, -0x1.22ebfd1efc36p+5 + 0x1.1a3889c162783p+0i,
0x1.1262c10f1c66ep+5 + -0x1.26bae57d922ap+0i, -0x1.58d3a2ca32c15p+5 + 0x1.32f2cb3a8b3f4p+0i,
0x1.a964fd03d91d8p+4 + -0x1.3edd24bc09308p+0i, -0x1.ac6c0981e358dp+5 + 0x1.4a76ef5d44ce4p+0i,
0x1.6218e6e12a3fep+5 + -0x1.55bd3cd3a67b7p+0i, -0x1.5393a07e5d858p+4 + 0x1.60ad33ec5426ap+0i,
0x1.218be1aa0a269p+5 + -0x1.6b4411446b14ep+0i, -0x1.a3e756f5c1d0fp+5 + 0x1.757f27fbb6bf9p+0i,
0x1.16d9a7d5f9f63p+5 + -0x1.7f5be261b802p+0i, -0x1.573711898e361p+4 + 0x1.88d7c29cd0a3p+0i,
0x1.21dc53244ed0ap+5 + -0x1.91f0634b68e76p+0i, -0x1.141630bafe132p+5 + 0x1.9aa3781ee6d5ep+0i,
0x1.72760ed30a0cdp+5 + -0x1.a2eece704f7cap+0i, -0x1.41be0afeb6439p+5 + 0x1.aad04dce6d2c1p+0i,
0x1.fe6fbc3dbaa3cp+4 + -0x1.b245f88556758p+0i, -0x1.1c2281238fb5dp+5 + 0x1.b94dec1f33c34p+0i,
0x1.5c0e9ca560b8cp+5 + -0x1.bfe661de230f8p+0i, -0x1.ceac3ce0f0c0ep+5 + 0x1.c60daf2f1ab41p+0i,
0x1.1fadc75aa1766p+5 + -0x1.cbc24615aeb98p+0i, -0x1.0e187cd952416p+5 + 0x1.d102b5909ce7cp+0i,
0x1.8be1a200d960ap+5 + -0x1.d5cda9f706a98p+0i, -0x1.3032d51e7c102p+5 + 0x1.da21ed4e40f3cp+0i,
0x1.4d6df1498439ap+5 + -0x1.ddfe67982469p+0i, -0x1.03889b3e1fd0fp+5 + 0x1.e1621f19ca442p+0i,
0x1.ccf4927b08456p+4 + -0x1.e44c389aa3e9ap+0i, -0x1.932cf364a8d1fp+5 + 0x1.e6bbf79bdd3ap+0i,
0x1.3c7c3f32a6c23p+5 + -0x1.e8b0be87fbcc4p+0i, -0x1.07bd515185a06p+5 + 0x1.ea2a0edaaec8ep+0i,
0x1.54de3a1e64464p+5 + -0x1.eb278940c587p+0i, -0x1.25dfd559c3e14p+5 + 0x1.eba8edb044cbep+0i,
0x1.a4af4f5616492p+5 + -0x1.ebae1b7894a1cp+0i, -0x1.958b5b1e09ac1p+5 + 0x1.eb37114ac2a2p+0i,
0x1.8df2574ea0ad3p+5 + -0x1.ea43ed39d663cp+0i, -0x1.bd0d96957d6ap+5 + 0x1.e8d4ecb3389a5p+0i,
0x1.49c1178cf9497p+5 + -0x1.e6ea6c6f2e0e4p+0i, -0x1.661ff44a955b7p+5 + 0x1.e484e8596af04p+0i,
0x1.02e334c1bd8fep+5 + -0x1.e1a4fb71c4276p+0i, -0x1.46fa0822ee72bp+5 + 0x1.de4b5fa506764p+0i,
0x1.a2580473795dap+5 + -0x1.da78ed9dfdaa6p+0i, -0x1.273978587b0bdp+4 + 0x1.d62e9c8eb7527p+0i,
0x1.62647b7bad5e6p+3 + -0x1.d16d81f20f3aep+0i, -0x1.122be47ce6df3p+5 + 0x1.cc36d14595066p+0i,
0x1.20353000e70fbp+5 + -0x1.c68bdbbbdcd9ap+0i, -0x1.c641ea57b4abep+4 + 0x1.c06e0fe74ed06p+0i,
0x1.9d7da29eba967p+3 + -0x1.b9def95d8a7a3p+0i, -0x1.1dad7b809bbf6p+5 + 0x1.b2e0405375fc2p+0i,
0x1.45881435e201dp+5 + -0x1.ab73a932121ep+0i, -0x1.724babbc16ed5p+5 + 0x1.a39b14242e0a1p+0i,
0x1.3ab018c238893p+5 + -0x1.9b587c9d17fe2p+0i, -0x1.f77607259c1dbp+4 + 0x1.92adf8d86964cp+0i,
0x1.4937d8aa29f48p+5 + -0x1.899db9530eacap+0i, -0x1.0ac245c5c3c5fp+5 + 0x1.802a083dad31ap+0i,
0x1.19b05b869d8ccp+3 + -0x1.765548e88aa18p+0i, -0x1.3918df776d561p+4 + 0x1.6c21f7291bac6p+0i,
0x1.0b36f23dd141p+5 + -0x1.6192a6b960abbp+0i, -0x1.51823667e47e8p+5 + 0x1.56aa02913928cp+0i,
0x1.69e13278a9385p+4 + -0x1.4b6acc39d825bp+0i, -0x1.0f1795a812ac5p+5 + 0x1.3fd7db1b84affp+0i,
0x1.297f32d0186edp+5 + -0x1.33f41bc5d40d4p+0i, -0x1.113fc6dd7b6d6p+5 + 0x1.27c28f328bb4fp+0i,
0x1.268d1626e824cp+5 + -0x1.1b464a035af39p+0i, -0x1.db275ca2fd404p+5 + 0x1.0e8273ba9d5b1p+0i,
0x1.b4a0ddb7f4259p+5 + -0x1.017a45ef580cep+0i, -0x1.416d35066bf1bp+5 + 0x1.e86216f94bdd8p-1i,
0x1.642e1955ac48p+5 + -0x1.cd543f598db93p-1i, -0x1.bf04aeccf459dp+5 + 0x1.b1d1dac0106f4p-1i,
0x1.88a9d914537fcp+5 + -0x1.95e1dc5f7abd6p-1i, -0x1.bcc78a899d058p+3 + 0x1.798b531aff5bp-1i,
0x1.49bdf84279e3ap+5 + -0x1.5cd567bdded8ap-1i, -0x1.0bca47952345cp+5 + 0x1.3fc75b2c5d6b1p-1i,
0x1.5053750f8b2c8p+5 + -0x1.2268848ea2723p-1i, -0x1.207888ce257e5p+5 + 0x1.04c04f75f7f84p-1i,
0x1.fbc993e5c34d4p+4 + -0x1.cdac73f9c3e9ap-2i, -0x1.5b330289f76efp+5 + 0x1.9163a5c50d9cap-2i,
0x1.3c078a1dbc302p+3 + -0x1.54b56f43c6c66p-2i, -0x1.c167949377abcp+3 + 0x1.17b1250a8c9ap-2i,
0x1.e72de280314bcp+4 + -0x1.b4cc62da3a98p-3i, -0x1.b94b594bc0febp+4 + 0x1.39c82132aced4p-3i,
0x1.575f336e56935p+5 + -0x1.7ce932b6c6f78p-4i, -0x1.e3eca0c75069cp+4 + 0x1.0bc3ce61da64p-5i,
0x1.562b79cbc0304p+5 + 0x1.c51cdd4ff136p-6i, -0x1.2cc964ce86c4dp+5 + -0x1.6853b7741f4bp-4i,
0x1.5b9ad38a6e923p+5 + 0x1.2f8297834c6cp-3i, -0x1.5c2cf0ad31ac2p+5 + -0x1.aa8ea5711b11cp-3i,
0x1.1f134bc642dd4p+5 + 0x1.129777a718a3p-2i, -0x1.db6b32c4e8849p+4 + -0x1.4fa23d0f92c6ep-2i,
0x1.28f6fad0204f1p+5 + 0x1.8c5836fb10183p-2i, -0x1.e61e8f0dc0406p+5 + -0x1.c8aa0eded93fcp-2i,
0x1.2f46d4bb58576p+5 + 0x1.024443be36199p-1i, -0x1.f7efddeb761c9p+4 + -0x1.1ff2405d8d048p-1i
)
assertThat(stats:::fft(inverse=TRUE,z=c(0+0i, -0.032413350884746-0.395985724495039i, -0.329248936013313+0.295925425748721i,
0.477173169178578+0.410835437506704i, -0.470864852191193+0.0020898113395i,
-0.395637669111166+0.09371630047074i, -0.390082255817687+0.058081027335136i,
0.297322818128679+0.139109774849282i, 0.245934701221508-0.519913703499104i,
-0.324992400837572-0.452962064350457i, -0.737690413913801+0.169844656876738i,
-0.234376831083589+0.653496325599457i, -0.657054027545147-0.198601521252393i,
0.113741154810212+0.455371403771566i, -0.30781824622239-1.27306743635963i,
-0.271696834361526-0.221455715504439i, 0.0142990279115924+0.0316923507925415i,
0.232486039383392-0.240809695275979i, -0.486370426368002+0.648268662437517i,
0.858668842822212+0.252012356124221i, 0.506800253430483+0.59438116616844i,
-0.333588446689667+0.88588241232328i, -0.361582093087892+0.259969958362385i,
0.512327736688256-0.088816983187171i, 0.282606283607878+0.473622010680636i,
-0.184904093113264-0.810883172795164i, 0.510364781341795+0.007217673016446i,
0.31514048613463-0.008738109889855i, 0.372309757327894-0.396923345486542i,
0.493129227486836+0.274659135651369i, 0.838506525466401-0.729513279510319i,
-0.596627848323625-0.241483536240807i, -0.182457218250056+0.252961943594378i,
0.268766182653571+0.575909158326938i, 0.595130038429572-0.403020039629623i,
0.271501811645802-0.244953376606825i, -0.365059137351186+0.616079022986633i,
0.654491660188509-0.418795964392674i, 0.844378271296845-0.214525817560491i,
-0.184512571106861-0.37602313164935i, -0.150551972688899+0.573706505551175i,
0.290309493709859-0.087755133731142i, 0.82180394758674-1.33649182064652i,
0.203052109393557-0.352347387606629i, -0.236643393414658+0.067162196583446i,
0.962760617302365+0.487201808686894i, -0.939641178516146-0.301471944642495i,
-1.29136056778701+0.99091873230636i, -1.0558358400708+0.79529187688265i,
0.315812349321178-0.452558843815238i, 0.558083109372165+0.394216348638022i,
1.58252718837965+0.10635929823983i, -0.723860324649444+0.09499467704508i,
-0.385671530463165-0.39227544466571i, 1.05665281904171+0.20200069854341i,
-0.313055482185121+0.563478566675619i, -0.782459601544091-0.103955130143651i,
-0.424150609627745+0.706317006145687i, -0.284144287182606-0.438700567101402i,
0.619007427583574+0.691193320499683i, 0.632524215846832+0.260242548393605i,
-0.80429832867806-1.02280005346484i, -0.048918775212354+0.218400718276831i,
-0.734342276226268-0.58629870013048i, -0.71175466177511-0.020587365815433i,
-0.127610195299029-0.727694696403183i, 0.654411201664163-0.711863277585737i,
-0.583192298784289-0.752324851554114i, -0.900164471986142+0.373665623363481i,
-0.260468437392966-0.489646773737359i, -0.364088005737045+0.412207684164289i,
-0.189440331654429-0.593817128377347i, -1.21538724087807+0.79512350346823i,
-0.05453550302005+1.02370218535713i, -0.58312602129959+0.270257444597416i,
0.492351425910926-0.709310402596133i, -0.01124865856325-1.10733867867924i,
0.524477849680165-0.609626579678407i, -0.261751601451768-0.879545849845574i,
-0.71706036895961-0.140572163988011i, -0.167991896582698+0.216639863576125i,
-0.249713256472457+0.20187516966261i, 0.327362517829653+0.649044594454464i,
-1.13859120925615-0.21720255393671i, -0.64108949607043+1.4310503818395i,
0.507542021817849-0.869313240271155i, 0.475744133219386+0.109714071587104i,
-0.278802911343235+0.838823380221168i, -1.59123253805249+0.46536117403087i,
0.152932829618307-0.495221522251877i, 0.09527524612609-0.554523411758624i,
0.06298107752425-1.03510510605306i, -0.35771417740566-0.574352614632856i,
1.09414487818611+0.13796715935197i, 0.176998602376203+0.955617499212537i,
0.02106283840174+1.08432668725375i, -0.423530803975363-0.709880435394784i,
0.624825542970224+0.473764558584155i, 0.4879237389953+0.44856849566119i,
-1.18872297895071+0i, 36.95629+0i, 0.629354224415147+0.619808129633136i,
0.4879237389953-0.44856849566119i, 0.624825542970224-0.473764558584156i,
-0.423530803975363+0.709880435394783i, 0.02106283840174-1.08432668725375i,
0.176998602376203-0.955617499212537i, 1.09414487818611-0.13796715935197i,
-0.357714177405661+0.574352614632857i, 0.06298107752425+1.03510510605306i,
0.09527524612609+0.554523411758624i, 0.152932829618307+0.495221522251877i,
-1.59123253805248-0.46536117403087i, -0.278802911343235-0.838823380221167i,
0.475744133219386-0.109714071587104i, 0.507542021817849+0.869313240271155i,
-0.64108949607043-1.4310503818395i, -1.13859120925615+0.21720255393671i,
0.327362517829653-0.649044594454464i, -0.249713256472457-0.20187516966261i,
-0.167991896582698-0.216639863576125i, -0.71706036895961+0.140572163988011i,
-0.261751601451768+0.879545849845574i, 0.524477849680165+0.609626579678406i,
-0.01124865856325+1.10733867867924i, 0.492351425910926+0.709310402596133i,
-0.583126021299589-0.270257444597416i, -0.05453550302005-1.02370218535713i,
-1.21538724087807-0.79512350346823i, -0.189440331654428+0.593817128377347i,
-0.364088005737045-0.412207684164289i, -0.260468437392966+0.489646773737359i,
-0.900164471986141-0.373665623363481i, -0.583192298784289+0.752324851554114i,
0.654411201664163+0.711863277585738i, -0.127610195299029+0.727694696403183i,
-0.71175466177511+0.020587365815433i, -0.734342276226269+0.58629870013048i,
-0.048918775212354-0.218400718276831i, -0.80429832867806+1.02280005346484i,
0.632524215846834-0.260242548393606i, 0.619007427583574-0.691193320499683i,
-0.284144287182606+0.438700567101402i, -0.424150609627745-0.706317006145686i,
-0.782459601544091+0.103955130143651i, -0.313055482185121-0.563478566675619i,
1.05665281904171-0.20200069854341i, -0.385671530463166+0.39227544466571i,
-0.723860324649445-0.09499467704508i, 1.58252718837965-0.10635929823983i,
0.558083109372165-0.394216348638022i, 0.315812349321178+0.452558843815238i,
-1.0558358400708-0.79529187688265i, -1.29136056778701-0.99091873230636i,
-0.939641178516146+0.301471944642495i, 0.962760617302365-0.487201808686894i,
-0.236643393414658-0.067162196583446i, 0.203052109393557+0.352347387606629i,
0.82180394758674+1.33649182064652i, 0.290309493709859+0.087755133731142i,
-0.150551972688899-0.573706505551175i, -0.184512571106861+0.376023131649349i,
0.844378271296845+0.214525817560491i, 0.65449166018851+0.418795964392675i,
-0.365059137351187-0.616079022986633i, 0.271501811645802+0.244953376606825i,
0.595130038429572+0.403020039629624i, 0.268766182653571-0.575909158326938i,
-0.182457218250056-0.252961943594378i, -0.596627848323625+0.241483536240807i,
0.838506525466401+0.729513279510319i, 0.493129227486836-0.274659135651368i,
0.372309757327894+0.396923345486543i, 0.31514048613463+0.008738109889855i,
0.510364781341795-0.007217673016446i, -0.184904093113264+0.810883172795164i,
0.282606283607878-0.473622010680636i, 0.512327736688256+0.088816983187171i,
-0.361582093087892-0.259969958362385i, -0.333588446689667-0.885882412323279i,
0.506800253430482-0.594381166168438i, 0.858668842822212-0.252012356124221i,
-0.486370426368002-0.648268662437517i, 0.232486039383392+0.240809695275979i,
0.0142990279115924-0.0316923507925415i, -0.271696834361526+0.221455715504439i,
-0.30781824622239+1.27306743635963i, 0.113741154810212-0.455371403771566i,
-0.657054027545147+0.198601521252393i, -0.234376831083589-0.653496325599458i,
-0.737690413913801-0.169844656876738i, -0.324992400837573+0.452962064350457i,
0.245934701221508+0.519913703499104i, 0.297322818128679-0.139109774849282i,
-0.390082255817687-0.058081027335136i, -0.395637669111166-0.09371630047074i,
-0.470864852191193-0.0020898113395i, 0.477173169178578-0.410835437506704i,
-0.329248936013313-0.295925425748721i, -0.032413350884746+0.395985724495039i
))
, identicalTo( expected, tol = 1e-6 ) ) |
"periodicTable" |
dm_nycflights13 <- function(cycle = FALSE, color = TRUE, subset = TRUE, compound = TRUE) {
if (subset) {
data <- nycflights_subset()
flights <- data$flights
weather <- data$weather
airlines <- data$airlines
airports <- data$airports
planes <- data$planes
} else {
flights <- nycflights13::flights
weather <- nycflights13::weather
airlines <- nycflights13::airlines
airports <- nycflights13::airports
planes <- nycflights13::planes
}
dm <-
dm(airlines, airports, flights, planes, weather) %>%
dm_add_pk(planes, tailnum) %>%
dm_add_pk(airlines, carrier) %>%
dm_add_pk(airports, faa) %>%
dm_add_fk(flights, tailnum, planes) %>%
dm_add_fk(flights, carrier, airlines) %>%
dm_add_fk(flights, origin, airports)
if (compound) {
dm <-
dm %>%
dm_add_pk(weather, c(origin, time_hour)) %>%
dm_add_fk(flights, c(origin, time_hour), weather)
}
if (color) {
dm <-
dm %>%
dm_set_colors(
"
"
"
)
}
if (cycle) {
dm <-
dm %>%
dm_add_fk(flights, dest, airports, check = FALSE)
}
dm
}
nycflights_subset <- function() {
readRDS(system.file("extdata/nycflights13-small.rds", package = "dm"))
} |
rp.slider <- function(panel, variable, from, to, action = I,
labels = NULL, names = NULL, title = NULL,
log = rep(FALSE, length(from)),
showvalue = FALSE, showvaluewidth = 4,
resolution = 0, initval = from, pos = NULL,
horizontal = TRUE, foreground = NULL, background = NULL, font = NULL,
parentname = deparse(substitute(panel)),
name = paste("slider", .nc(), sep=""), ...) {
panelname <- panel$panelname
varname <- deparse(substitute(variable))
if (is.null(labels)) {
if (length(from) == 1)
labels <- varname
else
labels <- paste(varname, 1:length(from), sep = "")
}
if (!rp.isnull(panelname, varname)) {
variable <- rp.var.get(panelname, varname)
if (is.null(names)) {
if (!is.null(names(variable)))
names <- names(variable)
else
names <- labels
}
}
else {
if (is.null(names)) names <- labels
variable <- initval
}
names(variable) <- names
rp.var.put(panelname, varname, variable)
if (is.null(pos) & length(list(...)) > 0) pos <- list(...)
f <- function(val) {
valexisting <- rp.var.get(panelname, varname)
names(val) <- names(valexisting)
rp.var.put(panelname, varname, val)
panel <- rp.control.get(panelname)
panel <- action(panel)
rp.control.put(panelname, panel)
}
if (rp.widget.exists(panelname, parentname))
parent <- rp.widget.get(panelname, parentname)
else
parent <- panel
if (is.list(pos) && !is.null(pos$grid))
parent <- rp.widget.get(panelname, pos$grid)
widget <- w.slider(parent, initval = variable, from, to, action = f,
labels, names, title, log,
showvalue, showvaluewidth, resolution, pos, horizontal, foreground,
background, font)
rp.widget.put(panelname, name, widget)
if (.rpenv$savepanel) rp.control.put(panelname, panel)
invisible(panelname)
}
rp.slider.change <- function(panel, name, value, i = 1, do = TRUE) {
if (!exists(panel$panelname, .rpenv, inherits = FALSE))
panelname = deparse(substitute(panel))
else
panelname = panel$panelname
w.slider.change(rp.widget.get(panelname, name), value, i, do)
}
w.slider <- function(parent, initval, from, to, action = I,
labels = deparse(substitute(initval)), names = labels, title = NULL,
log = rep(FALSE, length(from)),
showvalue = FALSE, showvaluewidth = 4, resolution = 0, pos = NULL,
horizontal = TRUE, foreground = NULL, background = NULL, font = NULL) {
widget <- w.createwidget(parent, pos, background, title)
widget$.type <- "sliders"
widget$.showvalue <- showvalue
widget$.showvaluewidth <- showvaluewidth
widget$.log <- log
widget$.var <- c()
if (showvalue == "widgets") {
widget$.text <- list()
widget$.show <- list()
}
widget$.sl <- list()
f <- function(...) {
variable <- c()
for (j in (1:length(from))) {
variable[j] <- as.numeric(handshake(tclvalue, widget$.var[[j]]))
if (log[j]) variable[j] <- exp(variable[j])
if (showvalue == "widgets")
w.text.change(widget$.show[[j]], signif(variable[j], showvaluewidth))
}
names(variable) <- names
if (length(from) == 1)
action(as.numeric(variable))
else
action(variable)
}
widget$.f <- f
orient <- if (horizontal) "horizontal" else "vertical"
for (i in 1:length(from)) {
if (log[i]) {
wto <- log(to[i])
wfrom <- log(from[i])
initv <- log(initval[i])
}
else {
wto <- to[i]
wfrom <- from[i];
initv <- initval[i]
}
widget$.var[i] <- list(handshake(tclVar, signif(initv, showvaluewidth)))
if (showvalue != "widgets") {
if ((showvalue == TRUE) && log[i]) showvalue <- FALSE
if (horizontal == TRUE) {
pos=list(
column=0,
row=i-1,
sticky="news",
height= 2 * as.integer(handshake(.Tcl, 'font metrics systemfont -linespace')),
cweight=1,
rweight=1
)
}
else {
pos=list(
column=i-1,
row=0,
sticky="news",
width = 2 * as.integer(handshake(.Tcl, 'font metrics systemfont -linespace')),
cweight=1,
rweight=1
)
}
sl <- w.createwidget(widget, pos=pos, background)
sl$.type <- "slider"
sl$.widget <- handshake(tkscale, widget$.handle, from = wfrom, to = wto,
showvalue = showvalue, orient = orient, label = labels[i],
resolution = resolution, variable = widget$.var[[i]])
handshake(tkbind, sl$.widget, "<B1-Motion>", f)
w.appearancewidget(sl, font, foreground, background)
widget$.sl[i] <- list(sl)
}
else {
if (horizontal) pos <- "left" else pos <- "top"
text <- w.text(widget, title[i], NA,
pos=list(
column=0,
row=i-1,
sticky="w",
cweight=1,
height=as.integer(handshake(.Tcl, 'font metrics systemfont -linespace')),
width = as.integer(handshake(.Tcl, paste('font measure systemfont "', title[i], '"', sep="") ))
), foreground, background, font)
if (horizontal==TRUE) {
pos=list(
column=0,
row=i-1,
sticky="news",
height= as.integer(handshake(.Tcl, 'font metrics systemfont -linespace')),
cweight=100
)
}
else {
pos=list(
column=i-1,
row=0,
sticky="news",
width=as.integer(handshake(.Tcl, 'font metrics systemfont -linespace')),
cweight=100
)
}
sl <- w.createwidget(widget, pos=pos, background)
sl$.type <- "slider"
sl$.widget <- handshake(tkscale, widget$.handle, from = wfrom, to = wto,
showvalue = FALSE, orient = orient, label = labels[i],
resolution = resolution, variable = widget$.var[[i]])
sl$.widget <- handshake(tkscale, widget$.handle, from=wfrom, to=wto,
showvalue=FALSE, orient=orient, resolution=resolution,
variable=widget$.var[[i]])
handshake(tkbind, sl$.widget, "<B1-Motion>", f)
w.appearancewidget(sl, font, foreground, background)
show <- w.text(widget, signif(initval, showvaluewidth), NA,
pos=list(column=2, row=i-1, sticky="e", cweight=1),
foreground, background, font, width=showvaluewidth+1)
widget$.text[i] <- list(text)
widget$.sl[i] <- list(sl)
widget$.show[i] <- list(show)
}
}
invisible(widget)
}
w.slider.change <- function(widget, value, i=1, do=TRUE) {
if (widget$.log[i])
tclvalue(widget$.var[[i]]) <- log(value)
else
tclvalue(widget$.var[[i]]) <- value
if (widget$.showvalue=="widgets") { w.text.change(widget$.show[[i]], signif(value, widget$.showvaluewidth)) }
if (do) { widget$.f(widget$.var) }
} |
"world_counts" |
read.crd.amber <- function(file, ...) {
cl <- match.call()
if(missing(file)) {
stop("read.pdb: please specify a PDB 'file' for reading")
}
if(!file.exists(file)) {
stop("No input PDB file found: check filename")
}
crd <- .read_crd(file)
if(!is.null(crd$error))
stop(paste("Could not read", file))
else
class(crd) <- c("amber", "crd")
if(is.na(crd$time))
crd$time <- NULL
if(!length(crd$velocities)>0)
crd$velocities <- NULL
if(!length(crd$box)>0)
crd$box <- NULL
crd$xyz <- as.xyz(crd$xyz)
crd$call <- cl
return(crd)
} |
NULL
replacement_costs <- function(x, solution, n = 1) {
assertthat::assert_that(
inherits(x, "ProjectProblem"),
inherits(solution, "data.frame"),
all(assertthat::has_name(solution, x$action_names())),
is.numeric(c(as.matrix(solution[, x$action_names()]))),
assertthat::noNA(c(as.matrix(solution[, x$action_names()]))),
assertthat::is.count(n),
is.finite(n),
isTRUE(n <= nrow(solution)))
assertthat::assert_that(!is.Waiver(x$objective),
msg = "argument to x does not have an objective specified.")
if (!inherits(solution, "tbl_df"))
solution <- tibble::as_tibble(solution)
suppressWarnings({x <- add_default_solver(x, gap = 0, verbose = FALSE)})
obj <- try(solution_statistics(x, solution[n, x$action_names()])$obj,
silent = TRUE)
if (inherits(obj, "try-error"))
stop("issue solving argument to x, please verify that it can be solved.")
a <- which(c(as.matrix(solution[n, x$action_names()])) > 0.5)
out <- lapply(a, function(i) {
o <- try(solve(add_locked_out_constraints(x, i)), silent = TRUE)
if (inherits(o, "try-error")) {
o <- data.frame(cost = Inf, obj = Inf)
} else {
o <- o[, c("cost", "obj")]
}
o
})
out <- do.call(rbind, out)
out$name <- x$action_names()[a]
out <- rbind(out, tibble::tibble(name = x$action_names()[-a],
cost = NA_real_,
obj = NA_real_))
out <- out[match(x$action_names(), out$name), , drop = FALSE]
out$rep_cost <- obj - out$obj
if (inherits(x$objective, "MinimumSetObjective"))
out$rep_cost <- out$rep_cost * -1
out[, c("name", "cost", "obj", "rep_cost")]
} |
gargle_app <- function() {
goa()
}
tidyverse_app <- function() {
check_permitted_package(parent.frame())
toa()
} |
ar <-
function (x, aic = TRUE, order.max = NULL,
method = c("yule-walker","burg", "ols", "mle", "yw"),
na.action = na.fail, series = deparse1(substitute(x)), ...)
{
res <- switch(match.arg(method),
yw =,
"yule-walker" = ar.yw(x, aic = aic, order.max = order.max,
na.action = na.action, series = series, ...),
"burg" = ar.burg(x, aic = aic, order.max = order.max,
na.action = na.action, series = series, ...),
"ols" = ar.ols(x, aic = aic, order.max = order.max,
na.action = na.action, series = series, ...),
"mle" = ar.mle(x, aic = aic, order.max = order.max,
na.action = na.action, series = series, ...)
)
res$call <- match.call()
res
}
ar.yw <- function(x, ...) UseMethod("ar.yw")
ar.yw.default <-
function (x, aic = TRUE, order.max = NULL, na.action = na.fail,
demean = TRUE, series = NULL, ...)
{
if(is.null(series)) series <- deparse1(substitute(x))
ists <- is.ts(x)
x <- na.action(as.ts(x))
if(ists) xtsp <- tsp(x)
xfreq <- frequency(x)
x <- as.matrix(x)
if(!is.numeric(x))
stop("'x' must be numeric")
if(any(is.na(x) != is.na(x[,1]))) stop("NAs in 'x' must be the same row-wise")
nser <- ncol(x)
if (demean) {
xm <- colMeans(x, na.rm=TRUE)
x <- sweep(x, 2L, xm, check.margin=FALSE)
} else xm <- rep.int(0, nser)
n.used <- nrow(x)
n.obs <- sum(!is.na(x[,1]))
order.max <- if (is.null(order.max))
min(n.obs - 1L, floor(10 * log10(n.obs))) else floor(order.max)
if (order.max < 1L) stop("'order.max' must be >= 1")
else if (order.max >= n.obs) stop("'order.max' must be < 'n.obs'")
xacf <- acf(x, type = "covariance", lag.max = order.max, plot = FALSE,
demean=demean, na.action = na.pass)$acf
if(nser > 1L) {
snames <- colnames(x)
A <- B <- array(0, dim = c(order.max + 1L, nser, nser))
A[1L, , ] <- B[1L, , ] <- diag(nser)
EA <- EB <- xacf[1L, , , drop = TRUE]
partialacf <- array(dim = c(order.max, nser, nser))
xaic <- numeric(order.max + 1L)
solve.yw <- function(m) {
betaA <- betaB <- 0
for (i in 0L:m) {
betaA <- betaA + A[i + 1L, , ] %*% xacf[m + 2L - i, , ]
betaB <- betaB + B[i + 1L, , ] %*% t(xacf[m + 2L - i, , ])
}
KA <- -t(qr.solve(t(EB), t(betaA)))
KB <- -t(qr.solve(t(EA), t(betaB)))
EB <<- (diag(nser) - KB %*% KA) %*% EB
EA <<- (diag(nser) - KA %*% KB) %*% EA
Aold <- A
Bold <- B
for (i in seq_len(m + 1L)) {
A[i + 1L, , ] <<- Aold[i + 1L, , ] + KA %*% Bold[m + 2L - i, , ]
B[i + 1L, , ] <<- Bold[i + 1L, , ] + KB %*% Aold[m + 2L - i, , ]
}
}
cal.aic <- function(m) {
logdet <- determinant.matrix(EA)$modulus
n.obs * logdet + 2 * m * nser * nser
}
cal.resid <- function() {
resid <- array(0, dim = c(n.used - order, nser))
for (i in 0L:order)
resid <- resid +
tcrossprod(x[(order - i + 1L):(n.used - i), , drop = FALSE],
ar[i + 1L, , ])
rbind(matrix(NA, order, nser), resid)
}
order <- 0L
for (m in 0L:order.max) {
xaic[m + 1L] <- cal.aic(m)
if (!aic || xaic[m + 1L] == min(xaic[seq_len(m + 1L)])) {
ar <- A
order <- m
var.pred <- EA * n.obs/(n.obs - nser * (m + 1L))
}
if (m < order.max) {
solve.yw(m)
partialacf[m + 1L, , ] <- -A[m + 2L, , ]
}
}
xaic <- setNames(xaic - min(xaic), 0L:order.max)
resid <- cal.resid()
if(order) {
ar <- -ar[2L:(order + 1L), , , drop = FALSE]
dimnames(ar) <- list(seq_len(order), snames, snames)
} else ar <- array(0, dim = c(0L, nser, nser),
dimnames = list(NULL, snames, snames))
dimnames(var.pred) <- list(snames, snames)
dimnames(partialacf) <- list(seq_len(order.max), snames, snames)
colnames(resid) <- colnames(x)
} else {
if (xacf[1L] == 0) stop("zero-variance series")
r <- as.double(drop(xacf))
z <- .Fortran(C_eureka, as.integer(order.max), r, r,
coefs = double(order.max^2),
vars = double(order.max),
double(order.max))
coefs <- matrix(z$coefs, order.max, order.max)
partialacf <- array(diag(coefs), dim = c(order.max, 1L, 1L))
var.pred <- c(r[1L], z$vars)
xaic <- n.obs * log(var.pred) + 2 * (0L:order.max) + 2 * demean
maic <- min(aic)
xaic <- setNames(if(is.finite(maic)) xaic - min(xaic) else
ifelse(xaic == maic, 0, Inf),
0L:order.max)
order <- if (aic) (0L:order.max)[xaic == 0L] else order.max
ar <- if (order) coefs[order, seq_len(order)] else numeric()
var.pred <- var.pred[order + 1L]
var.pred <- var.pred * n.obs/(n.obs - (order + 1L))
resid <- if(order) c(rep.int(NA, order), embed(x, order + 1L) %*% c(1, -ar))
else as.vector(x)
if(ists) {
attr(resid, "tsp") <- xtsp
attr(resid, "class") <- "ts"
}
}
res <- list(order = order, ar = ar, var.pred = var.pred, x.mean = drop(xm),
aic = xaic, n.used = n.used, n.obs = n.obs, order.max = order.max,
partialacf = partialacf, resid = resid, method = "Yule-Walker",
series = series, frequency = xfreq, call = match.call())
if(nser == 1L && order)
res$asy.var.coef <- var.pred/n.obs *
solve(toeplitz(drop(xacf)[seq_len(order)]))
class(res) <- "ar"
res
}
print.ar <- function(x, digits = max(3L, getOption("digits") - 3L), ...)
{
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
nser <- NCOL(x$var.pred)
if(nser > 1L) {
res <- x[c("ar", if(!is.null(x$x.intercept)) "x.intercept", "var.pred")]
res$ar <- aperm(res$ar, c(2L,3L,1L))
print(res, digits = digits)
} else {
if(x$order) {
cat("Coefficients:\n")
coef <- setNames(round(drop(x$ar), digits = digits),
seq_len(x$order))
print.default(coef, print.gap = 2L)
}
if(!is.null(xint <- x$x.intercept) && !is.na(xint))
cat("\nIntercept: ", format(xint, digits = digits),
" (", format(x$asy.se.coef$x.mean, digits = digits),
") ", "\n", sep = "")
cat("\nOrder selected", x$order, " sigma^2 estimated as ",
format(x$var.pred, digits = digits))
cat("\n")
}
invisible(x)
}
predict.ar <- function(object, newdata, n.ahead = 1L, se.fit = TRUE, ...)
{
if (n.ahead < 1L) stop("'n.ahead' must be at least 1")
if(missing(newdata)) {
newdata <- eval.parent(str2lang(object$series))
if (!is.null(nas <- object$call$na.action))
newdata <- eval.parent(call(nas, newdata))
}
nser <- NCOL(newdata)
ar <- object$ar
p <- object$order
st <- tsp(as.ts(newdata))[2L]
dt <- deltat(newdata)
xfreq <- frequency(newdata)
tsp(newdata) <- NULL
class(newdata) <- NULL
if(NCOL(ar) != nser)
stop("number of series in 'object' and 'newdata' do not match")
n <- NROW(newdata)
if(nser > 1L) {
xint <- object$x.intercept %||% rep.int(0, nser)
x <- rbind(sweep(newdata, 2L, object$x.mean, check.margin = FALSE),
matrix(rep.int(0, nser), n.ahead, nser, byrow = TRUE))
pred <- if(p) {
for(i in seq_len(n.ahead)) {
x[n+i,] <- ar[1L,,] %*% x[n+i-1L,] + xint
if(p > 1L) for(j in 2L:p)
x[n+i,] <- x[n+i,] + ar[j,,] %*% x[n+i-j,]
}
x[n + seq_len(n.ahead), ]
} else matrix(xint, n.ahead, nser, byrow = TRUE)
pred <- pred + matrix(object$x.mean, n.ahead, nser, byrow = TRUE)
colnames(pred) <- colnames(object$var.pred)
if(se.fit) {
warning("'se.fit' not yet implemented for multivariate models")
se <- matrix(NA, n.ahead, nser)
}
} else {
xint <- object$x.intercept %||% 0
x <- c(newdata - object$x.mean, rep.int(0, n.ahead))
if(p) {
for(i in seq_len(n.ahead))
x[n+i] <- sum(ar * x[n+i - seq_len(p)]) + xint
pred <- x[n + seq_len(n.ahead)]
if(se.fit) {
psi <- .Call(C_ar2ma, ar, n.ahead - 1L)
vars <- cumsum(c(1, psi^2))
se <- sqrt(object$var.pred*vars)[seq_len(n.ahead)]
}
} else {
pred <- rep.int(xint, n.ahead)
if (se.fit) se <- rep.int(sqrt(object$var.pred), n.ahead)
}
pred <- pred + rep.int(object$x.mean, n.ahead)
}
pred <- ts(pred, start = st + dt, frequency = xfreq)
if(se.fit)
list(pred = pred, se = ts(se, start = st + dt, frequency = xfreq))
else pred
}
ar.mle <- function (x, aic = TRUE, order.max = NULL, na.action = na.fail,
demean = TRUE, series = NULL, ...)
{
if(is.null(series)) series <- deparse1(substitute(x))
ists <- is.ts(x)
if (!is.null(dim(x)))
stop("MLE only implemented for univariate series")
x <- na.action(as.ts(x))
if(anyNA(x)) stop("NAs in 'x'")
if(!is.numeric(x))
stop("'x' must be numeric")
if(ists) xtsp <- tsp(x)
xfreq <- frequency(x)
x <- as.vector(x)
n.used <- length(x)
order.max <- if (is.null(order.max))
min(n.used-1L, 12L, floor(10 * log10(n.used)))
else round(order.max)
if (order.max < 0L) stop ("'order.max' must be >= 0")
else if (order.max >= n.used) stop("'order.max' must be < 'n.used'")
if (aic) {
coefs <- matrix(NA, order.max+1L, order.max+1L)
var.pred <- numeric(order.max+1L)
xaic <- numeric(order.max+1L)
xm <- if(demean) mean(x) else 0
coefs[1, 1L] <- xm
var0 <- sum((x-xm)^2)/n.used
var.pred[1L] <- var0
xaic[1L] <- n.used * log(var0) + 2 * demean + 2 + n.used + n.used * log(2 * pi)
for(i in seq_len(order.max)) {
fit <- arima0(x, order=c(i, 0L, 0L), include.mean=demean)
coefs[i+1L, seq_len(i+demean)] <- fit$coef[seq_len(i+demean)]
xaic[i+1L] <- fit$aic
var.pred[i+1L] <- fit$sigma2
}
xaic <- setNames(xaic - min(xaic), 0L:order.max)
order <- (0L:order.max)[xaic == 0L]
ar <- coefs[order+1L, seq_len(order)]
x.mean <- coefs[order+1L, order+1L]
var.pred <- var.pred[order+1L]
} else {
order <- order.max
fit <- arima0(x, order=c(order, 0L, 0L), include.mean=demean)
coefs <- fit$coef
if(demean) {
ar <- coefs[-length(coefs)]
x.mean <- coefs[length(coefs)]
} else {
ar <- coefs
x.mean <- 0
}
var.pred <- fit$sigma2
xaic <- structure(0, names=order)
}
resid <- if(order) c(rep(NA, order), embed(x - x.mean, order+1L) %*% c(1, -ar))
else x - x.mean
if(ists) {
attr(resid, "tsp") <- xtsp
attr(resid, "class") <- "ts"
}
res <- list(order = order, ar = ar, var.pred = var.pred,
x.mean = x.mean, aic = xaic,
n.used = n.used, n.obs = n.used, order.max = order.max,
partialacf = NULL, resid = resid, method = "MLE",
series = series, frequency = xfreq, call = match.call())
if(order) {
xacf <- acf(x, type = "covariance", lag.max = order, plot=FALSE)$acf
res$asy.var.coef <- var.pred/n.used *
solve(toeplitz(drop(xacf)[seq_len(order)]))
}
class(res) <- "ar"
res
}
ar.ols <- function (x, aic = TRUE, order.max = NULL, na.action = na.fail,
demean = TRUE, intercept = demean, series = NULL, ...)
{
if(is.null(series)) series <- deparse1(substitute(x))
rescale <- TRUE
ists <- is.ts(x)
x <- na.action(as.ts(x))
if(anyNA(x)) stop("NAs in 'x'")
if(ists) xtsp <- tsp(x)
xfreq <- frequency(x)
x <- as.matrix(x)
if(!is.numeric(x))
stop("'x' must be numeric")
n.used <- nrow(x)
nser <- ncol(x)
iser <- seq_len(nser)
if(rescale) {
sc <- sqrt(drop(apply(x, 2L, var)))
sc[sc == 0] <- 1
x <- x/rep.int(sc, rep.int(n.used, nser))
} else sc <- rep.int(1, nser)
order.max <- if (is.null(order.max))
min(n.used-1L, floor(10 * log10(n.used))) else round(order.max)
if (order.max < 0L) stop("'order.max' must be >= 0")
if (order.max >= n.used) stop("'order.max' must be < 'n.used'")
order.min <- if (aic) 0L else order.max
varE <- seA <- A <- vector("list", order.max - order.min + 1L)
xaic <- rep.int(Inf, order.max - order.min + 1L)
det <- function(x) max(0, prod(diag(qr(x)$qr))*(-1)^(ncol(x)-1))
if(demean) {
xm <- colMeans(x)
x <- sweep(x, 2L, xm, check.margin=FALSE)
} else xm <- rep.int(0, nser)
for (m in order.min:order.max)
{
y <- embed(x, m+1L)
X <-
if(intercept) {
if(m) cbind(rep.int(1,nrow(y)), y[, (nser+1L):ncol(y)])
else as.matrix(rep.int(1, nrow(y)))
} else {
if(m) y[, (nser+1L):ncol(y)] else matrix(0, nrow(y), 0)
}
Y <- t(y[, iser])
N <- ncol(Y)
XX <- t(X)%*%X
rank <- qr(XX)$rank
if (rank != nrow(XX))
{
warning(paste("model order: ", m,
"singularities in the computation of the projection matrix",
"results are only valid up to model order", m - 1L),
domain = NA)
break
}
P <- if(ncol(XX) > 0) solve(XX) else XX
A[[m - order.min + 1L]] <- Y %*% X %*% P
YH <- A[[m - order.min + 1L]] %*% t(X)
E <- (Y - YH)
varE[[m - order.min + 1L]] <- tcrossprod(E)/N
varA <- P %x% (varE[[m - order.min + 1L]])
seA[[m - order.min+1L]] <-
if(ncol(varA) > 0) sqrt(diag(varA)) else numeric()
xaic[m - order.min+1L] <-
n.used*log(det(varE[[m-order.min+1L]])) + 2*nser*(nser*m+intercept)
}
m <- if(aic) which.max(xaic == min(xaic)) + order.min - 1L else order.max
y <- embed(x, m+1L)
AA <- A[[m - order.min + 1L]]
if(intercept) {
xint <- AA[, 1L]
ar <- AA[, -1L]
X <- if(m) cbind(rep.int(1,nrow(y)), y[, (nser+1L):ncol(y)])
else as.matrix(rep.int(1, nrow(y)))
} else {
X <- if(m) y[, (nser+1L):ncol(y)] else matrix(0, nrow(y), 0L)
xint <- NULL
ar <- AA
}
Y <- t(y[, iser, drop = FALSE])
YH <- AA %*% t(X)
E <- drop(rbind(matrix(NA, m, nser), t(Y - YH)))
maic <- min(aic)
xaic <- setNames(if(is.finite(maic)) xaic - min(xaic) else
ifelse(xaic == maic, 0, Inf), order.min:order.max)
dim(ar) <- c(nser, nser, m)
ar <- aperm(ar, c(3L,1L,2L))
ses <- seA[[m - order.min + 1L]]
if(intercept) {
sem <- ses[iser]
ses <- ses[-iser]
} else sem <- rep.int(0, nser)
dim(ses) <- c(nser, nser, m)
ses <- aperm(ses, c(3L,1L,2L))
var.pred <- varE[[m - order.min + 1L]]
if(nser > 1L) {
snames <- colnames(x)
dimnames(ses) <- dimnames(ar) <- list(seq_len(m), snames, snames)
dimnames(var.pred) <- list(snames, snames)
names(sem) <- colnames(E) <- snames
} else {
var.pred <- drop(var.pred)
}
if(ists) {
attr(E, "tsp") <- xtsp
attr(E, "class") <- "ts"
}
if(rescale) {
xm <- xm * sc
if(!is.null(xint)) xint <- xint * sc
aa <- outer(sc, 1/sc)
if(nser > 1L && m) for(i in seq_len(m)) ar[i,,] <- ar[i,,]*aa
var.pred <- var.pred * drop(outer(sc, sc))
E <- E * rep.int(sc, rep.int(NROW(E), nser))
sem <- sem*sc
if(m)
for(i in seq_len(m)) ses[i,,] <- ses[i,,]*aa
}
res <- list(order = m, ar = ar, var.pred = var.pred,
x.mean = xm, x.intercept = xint, aic = xaic,
n.used = n.used, n.obs = n.used, order.max = order.max,
partialacf = NULL, resid = E, method = "Unconstrained LS",
series = series, frequency = xfreq, call = match.call(),
asy.se.coef = list(x.mean = sem, ar = drop(ses)))
class(res) <- "ar"
res
}
ar.yw.mts <-
function (x, aic = TRUE, order.max = NULL, na.action = na.fail,
demean = TRUE, series = NULL, var.method = 1L, ...)
{
if (is.null(series)) series <- deparse1(substitute(x))
if (ists <- is.ts(x)) xtsp <- tsp(x)
x <- na.action(as.ts(x))
if(any(is.na(x) != is.na(x[,1]))) stop("NAs in 'x' must be the same row-wise")
if (ists) xtsp <- tsp(x)
xfreq <- frequency(x)
x <- as.matrix(x)
nser <- ncol(x)
n.used <- nrow(x)
n.obs <- sum(!is.na(x[,1]))
if (demean) {
x.mean <- colMeans(x, na.rm=TRUE)
x <- sweep(x, 2L, x.mean, check.margin=FALSE)
}
else x.mean <- rep(0, nser)
order.max <- floor(order.max %||% (10 * log10(n.obs)))
if (order.max < 1L)
stop("'order.max' must be >= 1")
xacf <- acf(x, type = "cov", plot = FALSE,
lag.max = order.max, na.action = na.pass)$acf
z <- .C(C_multi_yw,
aperm(xacf, 3:1),
as.integer(n.obs),
as.integer(order.max),
as.integer(nser),
coefs = double((1L + order.max) * nser * nser),
pacf = double((1L + order.max) * nser * nser),
var = double((1L + order.max) * nser * nser),
aic = double(1L + order.max),
order = integer(1L),
as.integer(aic))
partialacf <- aperm(array(z$pacf, dim = c(nser, nser, order.max + 1L)), 3:1)[-1L, , , drop = FALSE]
var.pred <- aperm(array(z$var, dim = c(nser, nser, order.max + 1L)), 3:1)
xaic <- setNames(z$aic - min(z$aic), 0:order.max)
order <- z$order
resid <- x
if (order > 0) {
ar <- -aperm(array(z$coefs, dim = c(nser, nser, order.max + 1L)), 3:1)[2L:(order + 1L), , , drop = FALSE]
for (i in 1L:order)
resid[-(1L:order), ] <- resid[-(1L:order),] - x[(order - i + 1L):(n.used - i), ] %*% t(ar[i, , ])
resid[1L:order, ] <- NA
}
else ar <- array(dim = c(0, nser, nser))
var.pred <- var.pred[order + 1L, , , drop = TRUE] * n.obs/(n.obs - nser * (demean + order))
if (ists) {
attr(resid, "tsp") <- xtsp
attr(resid, "class") <- c("mts", "ts")
}
snames <- colnames(x)
colnames(resid) <- snames
dimnames(ar) <- list(seq_len(order), snames, snames)
dimnames(var.pred) <- list(snames, snames)
dimnames(partialacf) <- list(1L:order.max, snames, snames)
res <- list(order = order, ar = ar, var.pred = var.pred,
x.mean = x.mean, aic = xaic, n.used = n.used, n.obs = n.obs, order.max = order.max,
partialacf = partialacf, resid = resid, method = "Yule-Walker",
series = series, frequency = xfreq, call = match.call())
class(res) <- "ar"
res
}
ar.burg <- function(x, ...) UseMethod("ar.burg")
ar.burg.default <-
function (x, aic = TRUE, order.max = NULL, na.action = na.fail,
demean = TRUE, series = NULL, var.method = 1L, ...)
{
if(is.null(series)) series <- deparse1(substitute(x))
if (ists <- is.ts(x)) xtsp <- tsp(x)
x <- na.action(as.ts(x))
if(anyNA(x)) stop("NAs in 'x'")
if (ists) xtsp <- tsp(x)
xfreq <- frequency(x)
x <- as.vector(x)
if (demean) {
x.mean <- mean(x)
x <- x - x.mean
} else x.mean <- 0
n.used <- length(x)
order.max <- if (is.null(order.max))
min(n.used-1L, floor(10 * log10(n.used)))
else floor(order.max)
if (order.max < 1L) stop("'order.max' must be >= 1")
else if (order.max >= n.used) stop("'order.max' must be < 'n.used'")
xaic <- numeric(order.max + 1L)
z <- .Call(C_Burg, x, order.max)
coefs <- matrix(z[[1L]], order.max, order.max)
partialacf <- array(diag(coefs), dim = c(order.max, 1L, 1L))
var.pred <- if(var.method == 1L) z[[2L]] else z[[3L]]
if (any(is.nan(var.pred))) stop("zero-variance series")
xaic <- n.used * log(var.pred) + 2 * (0L:order.max) + 2 * demean
maic <- min(aic)
xaic <- setNames(if(is.finite(maic)) xaic - min(xaic) else
ifelse(xaic == maic, 0, Inf), 0L:order.max)
order <- if (aic) (0L:order.max)[xaic == 0] else order.max
ar <- if (order) coefs[order, 1L:order] else numeric()
var.pred <- var.pred[order + 1L]
resid <- if(order) c(rep(NA, order), embed(x, order+1L) %*% c(1, -ar))
else x
if(ists) {
attr(resid, "tsp") <- xtsp
attr(resid, "class") <- "ts"
}
res <- list(order = order, ar = ar, var.pred = var.pred, x.mean = x.mean,
aic = xaic, n.used = n.used, n.obs = n.used, order.max = order.max,
partialacf = partialacf, resid = resid,
method = ifelse(var.method==1L,"Burg","Burg2"),
series = series, frequency = xfreq, call = match.call())
if(order) {
xacf <- acf(x, type = "covariance", lag.max = order, plot = FALSE)$acf
res$asy.var.coef <- solve(toeplitz(drop(xacf)[seq_len(order)]))*var.pred/n.used
}
class(res) <- "ar"
res
}
ar.burg.mts <-
function (x, aic = TRUE, order.max = NULL, na.action = na.fail,
demean = TRUE, series = NULL, var.method = 1L, ...)
{
if (is.null(series))
series <- deparse1(substitute(x))
if (ists <- is.ts(x))
xtsp <- tsp(x)
x <- na.action(as.ts(x))
if (anyNA(x))
stop("NAs in 'x'")
if (ists)
xtsp <- tsp(x)
xfreq <- frequency(x)
x <- as.matrix(x)
nser <- ncol(x)
n.used <- nrow(x)
if (demean) {
x.mean <- colMeans(x)
x <- sweep(x, 2L, x.mean, check.margin = FALSE)
}
else x.mean <- rep(0, nser)
order.max <- floor(if(is.null(order.max)) 10 * log10(n.used) else order.max)
z <- .C(C_multi_burg,
as.integer(n.used),
resid = as.double(x),
as.integer(order.max),
as.integer(nser),
coefs = double((1L + order.max) * nser * nser),
pacf = double((1L + order.max) * nser * nser),
var = double((1L + order.max) * nser * nser),
aic = double(1L + order.max),
order = integer(1L),
as.integer(aic),
as.integer(var.method))
partialacf <-
aperm(array(z$pacf, dim = c(nser, nser, order.max + 1L)), 3:1)[-1L, , , drop = FALSE]
var.pred <- aperm(array(z$var, dim = c(nser, nser, order.max + 1L)), 3:1)
xaic <- setNames(z$aic - min(z$aic), 0:order.max)
order <- z$order
ar <- if (order)
-aperm(array(z$coefs, dim = c(nser, nser, order.max + 1L)), 3:1)[2L:(order + 1L), , , drop = FALSE]
else array(dim = c(0, nser, nser))
var.pred <- var.pred[order + 1L, , , drop = TRUE]
resid <- matrix(z$resid, nrow = n.used, ncol = nser)
if (order) resid[seq_len(order), ] <- NA
if (ists) {
attr(resid, "tsp") <- xtsp
attr(resid, "class") <- "mts"
}
snames <- colnames(x)
colnames(resid) <- snames
dimnames(ar) <- list(seq_len(order), snames, snames)
dimnames(var.pred) <- list(snames, snames)
dimnames(partialacf) <- list(seq_len(order.max), snames, snames)
res <- list(order = order, ar = ar, var.pred = var.pred, x.mean = x.mean,
aic = xaic, n.used = n.used, n.obs = n.used, order.max = order.max,
partialacf = partialacf, resid = resid,
method = ifelse(var.method == 1L, "Burg", "Burg2"),
series = series, frequency = xfreq,
call = match.call())
class(res) <- "ar"
res
} |
LASmetrics<-function(LASfile,minht=1.37,above=2) {
if (class(minht)!="numeric") {stop("The minht parameter is invalid. It is not a numeric input")}
if (class(above)!="numeric") {stop("The above parameter is invalid. It is not a numeric input")}
LASfile<-readLAS(LASfile, short=T)
MaxZ<-max(LASfile[,"Z"])
if (minht >= MaxZ) {stop(paste0("The minht parameter is invalid. It must to be less than ",MaxZ))}
allreturn<-nrow(LASfile)
allreturn_minht<-subset(LASfile,LASfile[,"Z"] > minht)
firstReturn<-subset(LASfile, LASfile[,"ReturnNumber"] == 1)
allreturnAbove<-subset(LASfile, LASfile[,"Z"] > above)
firstReturnAbove<-subset(firstReturn, firstReturn[,"Z"] > above)
firstabovemean<-subset(firstReturn, firstReturn[,"Z"] > mean(allreturnAbove[,"Z"]))
firstabovemode<-subset(firstReturn, firstReturn[,"Z"] > as.numeric(names(table(allreturnAbove[,"Z"]))[which.max(table(allreturnAbove[,"Z"]))]))
allabovemean<-subset(LASfile, LASfile[,"Z"] > mean(allreturnAbove[,"Z"]))
hmode<-(names(table(allreturnAbove[,"Z"]))[which.max(table(allreturnAbove[,"Z"]))])
allabovemode<-subset(LASfile, LASfile[,"Z"] > hmode)
"skewness" <-
function (x, na.rm = FALSE)
{
if (is.matrix(x))
apply(x, 2, skewness, na.rm = na.rm)
else if (is.vector(x)) {
if (na.rm) x <- x[!is.na(x)]
n <- length(x)
(sum((x-mean(x))^3)/n)/(sum((x-mean(x))^2)/n)^(3/2)
}
else if (is.data.frame(x))
sapply(x, skewness, na.rm = na.rm)
else skewness(as.vector(x), na.rm = na.rm)
}
"kurtosis" <-
function (x, na.rm = FALSE)
{
if (is.matrix(x))
apply(x, 2, kurtosis, na.rm = na.rm)
else if (is.vector(x)) {
if (na.rm) x <- x[!is.na(x)]
n <- length(x)
n*sum( (x-mean(x))^4 )/(sum( (x-mean(x))^2 )^2)
}
else if (is.data.frame(x))
sapply(x, kurtosis, na.rm = na.rm)
else kurtosis(as.vector(x), na.rm = na.rm)
}
metrics<-data.frame(
Total.all.return.count=nrow(LASfile),
Total.first.return.count=nrow(firstReturn),
Total.all.return.count.aboveXX=nrow(allreturn_minht),
Return.1.count=nrow(subset(allreturn_minht,allreturn_minht[,"ReturnNumber"]==1)),
Return.2.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==2)),
Return.3.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==3)),
Return.4.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==4)),
Return.5.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==5)),
Return.6.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==6)),
Return.7.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==7)),
Return.8.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==8)),
Return.9.count=nrow(subset(allreturn_minht, allreturn_minht[,"ReturnNumber"]==9)),
hmin=round(min(allreturn_minht[,"Z"]), digits=2),
hmax=round(max(allreturn_minht[,"Z"]), digits=2),
hmean=round(mean(allreturn_minht[,"Z"]),digits=2),
hmode = round(as.numeric(names(table(allreturn_minht[,"Z"]))[which.max(table(allreturn_minht[,"Z"]))]), digits=2),
hmedian=round(median(allreturn_minht[,"Z"]),digits=2),
hsd=round(sd(allreturn_minht[,"Z"]),digits=2),
hvar=round(var(allreturn_minht[,"Z"]),digits=2),
hcv=round((sd(allreturn_minht[,"Z"])/mean(allreturn_minht[,"Z"]))*100,digits=2),
hkurtosis=round(kurtosis(allreturn_minht[,"Z"]),digits=2),
hskewness=round(skewness(allreturn_minht[,"Z"]),digits=2),
hP1=round(quantile(allreturn_minht[,"Z"],0.01),digits=2),
hP5=round(quantile(allreturn_minht[,"Z"],0.05),digits=2),
hP10=round(quantile(allreturn_minht[,"Z"],0.1),digits=2),
hP15=round(quantile(allreturn_minht[,"Z"],0.15),digits=2),
hP20=round(quantile(allreturn_minht[,"Z"],0.20),digits=2),
hP25=round(quantile(allreturn_minht[,"Z"],0.25),digits=2),
hP30=round(quantile(allreturn_minht[,"Z"],0.30),digits=2),
hP35=round(quantile(allreturn_minht[,"Z"],0.35),digits=2),
hP40=round(quantile(allreturn_minht[,"Z"],0.40),digits=2),
hP45=round(quantile(allreturn_minht[,"Z"],0.45),digits=2),
hP50=round(quantile(allreturn_minht[,"Z"],0.50),digits=2),
hP55=round(quantile(allreturn_minht[,"Z"],0.55),digits=2),
hP60=round(quantile(allreturn_minht[,"Z"],0.60),digits=2),
hP65=round(quantile(allreturn_minht[,"Z"],0.65),digits=2),
hP70=round(quantile(allreturn_minht[,"Z"],0.70),digits=2),
hP75=round(quantile(allreturn_minht[,"Z"],0.75),digits=2),
hP80=round(quantile(allreturn_minht[,"Z"],0.85),digits=2),
hP90=round(quantile(allreturn_minht[,"Z"],0.90),digits=2),
hP95=round(quantile(allreturn_minht[,"Z"],0.95),digits=2),
hP99=round(quantile(allreturn_minht[,"Z"],0.99),digits=2),
Canopy.relief.ratio= ((mean(allreturn_minht[,"Z"])-min(allreturn_minht[,"Z"]))/(max(allreturn_minht[,"Z"])-min(allreturn_minht[,"Z"]))),
Imin=round(min(allreturn_minht[,"Intensity"],digits=2)),
Imax=round(max(allreturn_minht[,"Intensity"]),digits=2),
Imean=round(mean(allreturn_minht[,"Intensity"]),digits=2),
Imode = round(as.numeric(names(table(allreturn_minht[,"Intensity"]))[which.max(table(allreturn_minht[,"Intensity"]))]),digits=2),
Imedian=round(median(allreturn_minht[,"Intensity"]),digits=2),
Isd=round(sd(allreturn_minht[,"Intensity"]),digits=2),
Ivar=round(var(allreturn_minht[,"Intensity"]),digits=2),
Icv=round((sd(allreturn_minht[,"Intensity"])/mean(allreturn_minht[,"Intensity"]))*100,digits=2),
Ikurtosis=round(kurtosis(allreturn_minht[,"Intensity"]),digits=2),
Iskewness=round(skewness(allreturn_minht[,"Intensity"]),digits=2),
IP1=round(quantile(allreturn_minht[,"Intensity"],0.01),digits=2),
IP5=round(quantile(allreturn_minht[,"Intensity"],0.05),digits=2),
IP10=round(quantile(allreturn_minht[,"Intensity"],0.1),digits=2),
IP15=round(quantile(allreturn_minht[,"Intensity"],0.15),digits=2),
IP20=round(quantile(allreturn_minht[,"Intensity"],0.20),digits=2),
IP25=round(quantile(allreturn_minht[,"Intensity"],0.25),digits=2),
IP30=round(quantile(allreturn_minht[,"Intensity"],0.30),digits=2),
IP35=round(quantile(allreturn_minht[,"Intensity"],0.35),digits=2),
IP40=round(quantile(allreturn_minht[,"Intensity"],0.40),digits=2),
IP45=round(quantile(allreturn_minht[,"Intensity"],0.45),digits=2),
IP50=round(quantile(allreturn_minht[,"Intensity"],0.50),digits=2),
IP55=round(quantile(allreturn_minht[,"Intensity"],0.55),digits=2),
IP60=round(quantile(allreturn_minht[,"Intensity"],0.60),digits=2),
IP65=round(quantile(allreturn_minht[,"Intensity"],0.65),digits=2),
IP70=round(quantile(allreturn_minht[,"Intensity"],0.70),digits=2),
IP75=round(quantile(allreturn_minht[,"Intensity"],0.75),digits=2),
IP80=round(quantile(allreturn_minht[,"Intensity"],0.85),digits=2),
IP90=round(quantile(allreturn_minht[,"Intensity"],0.90),digits=2),
IP95=round(quantile(allreturn_minht[,"Intensity"],0.95),digits=2),
IP99=round(quantile(allreturn_minht[,"Intensity"],0.99),digits=2),
Pentage.first.returns.Above.XX=(nrow(firstReturnAbove))/(nrow(firstReturn))*100,
Percentage.all.returns.above.XX=(nrow(allreturnAbove)/allreturn)*100,
All.returns.above.XX.Total.first.returns.100=(nrow(allreturnAbove)/nrow(firstReturn))*100,
First.returns.above.XX=nrow(firstReturnAbove),
All.returns.above.XX=nrow(allreturnAbove),
Percentage.first.returns.above.mean = (nrow(firstabovemean)/nrow(firstReturn))*100,
Percentage.first.returns.above.mode = (nrow(firstabovemode)/nrow(firstReturn))*100,
Percentage.all.returns.above.mean = (nrow(allabovemean)/allreturn)*100,
Percentage.all.returns.above.mode = (nrow(allabovemode)/allreturn)*100,
All.returns.above.mean.Total.first.returns.100 = (nrow(allabovemean)/nrow(firstReturn))*100,
All.returns.above.mode.Total.first.returns.100 = (nrow(allabovemode)/nrow(firstReturn))*100,
First.returns.above.mean= nrow(firstabovemean),
First.returns.above.mode= nrow(firstabovemode),
All.returns.above.mean= nrow(allabovemean),
All.returns.above.mode= nrow(allabovemode))
colnames(metrics)<-c("Total all return count","Total first return count",paste("Total all return count above", above),paste("Return 1 count above", above),paste("Return 2 count above", above),paste("Return 3 count", above),paste("Return 4 count", above),
paste("Return 5 count", above),paste("Return 6 count", above),paste("Return 7 count above", above),paste("Return 8 count above", above),paste("Return 9 count above", above),"hmin",
"HMAX","HMEAN", "HMODE","HMEADIAN","HSD","HVAR","HCV","HKUR","HSKE","H01TH",
"H05TH","H10TH","H15TH","H20TH","H25TH","H30TH","H35TH","H40TH","H45TH","H50TH","H55TH","H60TH","H65TH","H70TH","H75TH",
"H80TH","H90TH","H95TH","H99TH","Canopy.relief.ratio","IMIN","IMAX","IMEAN","IMODE","IMEADIAN","ISD","IVAR","ICV",
"IKUR","ISKE","I01TH","I05TH","I10TH","I15TH","I20TH","I25TH","I30TH","I35TH","I40TH","I45TH","I50TH","I55TH","I60TH",
"IP65","IP70","IP75","IP80","IP90","IP95","IP99",paste("Pentage first returns Above",above),paste("Percentage all returns above",above),
paste("(All returns above",above,"/ Total first returns)*100"),paste("First returns above",above),paste("All returns above",above),"Percentage first returns above mean",
"Percentage first returns above mode","Percentage.all.returns.above.mean","Percentage all returns above mode","(All returns above mean / Total first returns)*100",
"(All returns above mode / Total first returns)* 100","First returns above mean","First returns above mode","All returns above mean","All returns above mode")
rownames(metrics)<-NULL
return(data.frame(metrics))
} |
WPSLiteralData <- R6Class("WPSLiteralData",
inherit = OGCAbstractObject,
private = list(
xmlElement = "LiteralData",
xmlNamespacePrefix = "WPS"
),
public = list(
value = NULL,
initialize = function(xml = NULL, value = NULL, serviceVersion = "1.0.0") {
private$xmlNamespacePrefix = paste(private$xmlNamespacePrefix, gsub("\\.", "_", serviceVersion), sep="_")
super$initialize(xml = xml, element = private$xmlElement, namespacePrefix = private$xmlNamespacePrefix)
self$wrap <- TRUE
if(is.null(xml)){
self$attrs$dataType <- switch(class(value),
"character" = "xs:string",
"numeric" = "xs:double",
"integer" = "xs:int",
"logical" = "xs:boolean",
"xs:string"
)
self$value <- value
if(is.logical(value)) self$value <- tolower(as.character(value))
}else{
self$decode(xml)
}
},
decode = function(xml){
print(xml)
dataType <- xmlGetAttr(xml, "dataType")
if(is.null(dataType)) dataType <- "xs:string"
self$attrs$dataType <- dataType
value <- xmlValue(xml)
self$value <- switch(dataType,
"xs:string" = value,
"xs:numeric" = as.numeric(value),
"xs:double" = as.numeric(value),
"xs:int" = as.integer(value),
"xs:integer" = as.integer(value),
"xs:boolean" = as.logical(value),
value
)
},
checkValidity = function(parameterDescription){
valid <- switch(self$attrs$dataType,
"character" = { parameterDescription$getDataType() == "string" },
"numeric" = { parameterDescription$getDataType() == "double" },
"integer" = { parameterDescription$getDataType() == "integer"},
"logical" = { parameterDescription$getDataType() == "boolean"},
TRUE
)
if(!valid){
errMsg <- sprintf("WPS Parameter [%s]: Data type '%s' is invalid.",
parameterDescription$getIdentifier(), self$attrs$dataType)
self$ERROR(errMsg)
stop(errMsg)
}
allowedValues <- parameterDescription$getAllowedValues()
if(length(allowedValues)>0){
if(!self$value %in% allowedValues){
errMsg <- sprintf("WPS Parameter [%s]: Value '%s' is invalid. Allowed values are [%s]",
parameterDescription$getIdentifier(), self$value,
paste0(allowedValues, collapse=", "))
self$ERROR(errMsg)
stop(errMsg)
}
}
}
)
) |
delayedAssign <-
function(x, value, eval.env=parent.frame(1), assign.env=parent.frame(1))
.Internal(delayedAssign(x, substitute(value), eval.env, assign.env)) |
get_original_term2 = function(x,y)
{
dummy_cluster = list(list(Words=x,Frequencies=0))
res = get_original_term(y,dummy_cluster)
return(res);
} |
simDM <- function(nsites = 50, nsurveys = 3, nyears = 5,
mean.lambda = 4, mean.gamma.rel = 0.5,
mean.phi = 0.8, mean.p = 0.7,
beta.lam = 1, beta.gamma = 1, beta.phi = -1, beta.p = -1,
show.plots=TRUE){
nsites <- round(nsites[1])
nsurveys <- round(nsurveys[1])
nyears <- round(nyears[1])
stopifNegative(mean.lambda, allowZero=FALSE)
stopifnotProbability(mean.gamma.rel)
stopifnotProbability(mean.phi)
stopifnotProbability(mean.p)
y <- p <- array(NA, dim = c(nsites, nyears, nsurveys))
N <- matrix(NA, nsites, nyears)
S <- R <- matrix(NA, nsites, nyears-1)
cov.lam <- runif(nsites, -1, 1)
cov.gamma <- runif(nsites, -1, 1)
cov.phi <- runif(nsites, -1, 1)
cov.p <- array(runif(nsites*nyears*nsurveys, -1, 1), dim = dim(y))
lambda <- exp(log(mean.lambda) + beta.lam * cov.lam)
N[,1] <- rpois(nsites, lambda)
phi <- plogis(qlogis(mean.phi) + beta.phi * cov.phi)
gamma <- exp(log(mean.gamma.rel) + beta.gamma * cov.gamma)
for(t in 1:(nyears-1)) {
S[,t] <- rbinom(nsites, N[,t], phi)
R[,t] <- rpois(nsites, N[,(t)]*gamma)
N[,t+1] <- S[,t] + R[,t]
}
for(i in 1:nsites){
for(t in 1:nyears){
for(j in 1:nsurveys){
p[i,t,j] <- plogis(qlogis(mean.p) + beta.p * cov.p[i,t,j])
y[i,t,j] <- rbinom(1, N[i,t], p[i,t,j])
}
}
}
yy <- ccov.p <- array(NA, dim = c(nsites, nsurveys*nyears))
for(t in 1:nyears){
yy[,(nsurveys * t-(nsurveys-1)):(nsurveys*t)] <- y[,t,]
ccov.p[,(nsurveys * t-(nsurveys-1)):(nsurveys*t)] <- cov.p[,t,]
}
if(show.plots) {
op <- par(mfrow = c(3, 2), mar = c(5,5,4,3), cex.lab = 1.5, cex.axis = 1.5)
on.exit(par(op))
tryPlot <- try( {
matplot(t(N), type = 'l',
main = paste('Population trajectories under a simple DM model \nwith mean lambda =',
mean.lambda, ', mean gamma =', mean.gamma.rel, ' and mean phi =', mean.phi, ''),
lty = 1, lwd = 3, las = 1, frame = FALSE, xlab = 'Year', ylab = 'N')
matplot(t(S), type = 'l', main = 'Number of apparent survivors', lty = 1, lwd = 3, las = 1,
frame = FALSE, xlab = 'Year', ylab = 'Survivors (S)')
matplot(t(R), type = 'l', main = 'Number of recruits', lty = 1, lwd = 3, las = 1,
frame = FALSE, xlab = 'Year', ylab = 'Recruits (R)')
matplot(t(apply(p, c(1,2), mean)), type = 'l',
main = 'Average detection probability per site and year', lty = 1, lwd = 3, las = 1,
frame = FALSE, xlab = 'Year', ylab = 'Average p')
hist(N[,1], main = 'Distribution of N in first year', breaks = 50, col = 'grey')
hist(N[,nyears], main = 'Distribution of N in last year', breaks = 50, col = 'grey')
}, silent = TRUE)
if(inherits(tryPlot, "try-error"))
tryPlotError(tryPlot)
}
return(list(
nsites = nsites, nsurveys = nsurveys, nyears = nyears, mean.lambda = mean.lambda,
mean.gamma.rel = mean.gamma.rel, mean.phi = mean.phi, mean.p = mean.p,
beta.lam = beta.lam, beta.gamma = beta.gamma, beta.phi = beta.phi, beta.p = beta.p,
cov.lam = cov.lam, cov.gamma = cov.gamma, cov.phi = cov.phi,
cov.p = cov.p,
ccov.p = ccov.p,
N = N,
S = S, R = R,
p = p,
y = y,
yy = yy))
} |
pprior <- function(q,
prior_par = list(mu_psi = 0, sigma_psi = 1,
mu_beta = 0, sigma_beta = 1),
what = "logor",
hypothesis = "H1") {
if ( ! is.list(prior_par) ||
! all(c("mu_psi", "sigma_psi", "mu_beta", "sigma_beta") %in%
names(prior_par))) {
stop('prior_par needs to be a named list with elements
"mu_psi", "sigma_psi", "mu_beta", and "sigma_beta',
call. = FALSE)
}
if (prior_par$sigma_psi <= 0 || prior_par$sigma_beta <= 0) {
stop('sigma_psi and sigma_beta need to be larger than 0',
call. = FALSE)
}
if ( ! (what %in% c("logor", "or", "rrisk", "arisk"))) {
stop('what needs to be either "logor", "or", "rrisk", or "arisk"',
call. = FALSE)
}
if ( ! (hypothesis %in% c("H1", "H+", "H-"))) {
stop('hypothesis needs to be either "H1", "H+", or "H-"',
call. = FALSE)
}
if (what %in% c("logor", "or")) {
out <- do.call(what = paste0("p", what),
args = list(q = q,
mu_psi = prior_par[["mu_psi"]],
sigma_psi = prior_par[["sigma_psi"]],
hypothesis = hypothesis))
} else if (what %in% c("rrisk", "arisk")) {
out <- do.call(what = paste0("p", what),
args = list(q = q,
mu_psi = prior_par[["mu_psi"]],
sigma_psi = prior_par[["sigma_psi"]],
mu_beta = prior_par[["mu_beta"]],
sigma_beta = prior_par[["sigma_beta"]],
hypothesis = hypothesis))
}
return(out)
} |
setClass("ProbAcceptance", slots =
list(probabilities = "numeric",
extra = "list"))
ProbAccept <- function(n, mu, sd, ptolerror = 0.85,
distribution = "normal",
criteria = "SP10:2006",
simulate = FALSE, sim.count = 1e4,
noshow = FALSE) {
if (!is.logical(simulate))
stop("Wrong input for 'simulate'.")
check_crit <- match.arg(criteria, c("SP10:2006"))
check_in <- match.arg(distribution, c("normal"))
if ((sim.count < 0) || !(sim.count = sim.count))
stop("Bad input for 'sim.count'.")
if (mu >= rootinv(0.0001, ptolerror = ptolerror))
stop("'sigmaAAMI' not defined for mu > ",
round(rootinv(0.0001, ptolerror = ptolerror), 3),
", subject to ptolerror = ", ptolerror, ".")
if (sd <= 0)
stop("Wrong input for 'sd'.")
if ( (ptolerror <=0 ) || (ptolerror >=1))
stop("Wrong input for 'ptolerror'.")
exProb <- integrate(innerIntegral3, lower = 0.0000, upper = root(0),
mu = mu, std.dev= sd, n = n, ptolerror = ptolerror)$value
if (simulate) {
std.dev <- sd
pass.rates <- sim.mu <- sim.sd <- NULL
lower.limit <- -10
upper.limit <- 10
for(i in 1:length(mu)){
for (j in 1:sim.count){
data <- rnorm(n = n, mean = mu[i], sd = std.dev[i])
pass.rate <- sum(data >= lower.limit && data <= upper.limit) / n
pass.rates <- append(pass.rates, pass.rate)
sim.mu <- append(sim.mu, mean(data))
sim.sd <- append(sim.sd, sd(data))
}
}
atest <- TRUE
if (atest) {
sim.sd.aami = mapply(root,sim.mu, ptolerror)
} else {
sim.sd.aami = root(mu = mu)
}
audit.df <- data.frame(Mean=sim.mu, StdDev=sim.sd, StdDevaami=sim.sd.aami)
audit.df['Pass'] = ifelse (audit.df$StdDevaami >= audit.df$StdDev, 1, 0)
audit.check = round(sum(audit.df['Pass'], na.rm = TRUE) /
sum(!is.na(audit.df['Pass'])), 3)
out <- new("ProbAcceptance",
probabilities = audit.check,
extra = list(sim.sd = sim.sd,
sim.sd.aami = sim.sd.aami,
simulate = simulate,
n = n,
noshow = noshow,
ptolerror = ptolerror,
sim.count = sim.count,
mu = mu, std.dev = sd))
} else {
out <- new("ProbAcceptance",
probabilities = exProb,
extra = list(sim.sd = NULL,
sim.sd.aami = NULL,
simulate = simulate,
n = n,
noshow = noshow,
sim.count = sim.count,
mu = mu, std.dev = sd))
}
out@extra[["exactProb"]] <- exProb
if (!noshow) {
message("\n
--- Exact results ---
\n
The exact probability of accepting the device is ", exProb,
".
\n")
}
out
}
setMethod("show", signature = signature(object = "ProbAcceptance"),
function(object) {
if (object@extra$simulate) {
cat("\n")
cat("\n")
cat(" --- Simulation results ---")
cat("\n")
cat("Based on n =", object@extra$sim.count,
"samples from a normal distribution, the probability"
,"of accepting the device is",
round(object@probabilities, 5),".")
cat("\n")
cat("Input: sample size (i.e., 'n') = ", object@extra$n, ";",
"'mu' = ", object@extra$mu, " ; 'sd' = ",
object@extra$std.dev,
" ; ptolerror =", object@extra$ptolerror, ".")
cat("\n")
cat("\n")
cat("** Run plot() for this object to get the scatter of the simulated samples.")
cat("\n")
} else {
cat("\n")
cat(">>> No simulations to run.")
cat("\n")
}
})
setMethod("plot", signature = signature(x ="ProbAcceptance", y = "ANY"),
function(x, y, ...) {
if (x@extra$simulate) {
sim.sd <- x@extra$sim.sd
sim.sd.aami <- x@extra$sim.sd.aami
xx <- c(0, 2*(1:5), 2*(5:1), 0)
yy <- c(0, 2*(1:5), 0, 0, 0, 0, 0 ,0)
plot(xx, yy, type = "l",
ylab = "SD-AAMI SP10", xlab = "Sample standard deviations",
main = "Scatter SD-AAMI SP10 vs. Sample SDs.", ...)
polygon(xx, yy, col = "
points(sim.sd.aami, sim.sd, ylim = c(0, 10), xlim = c(0, 10),
pch = 19, cex = 0.65)
abline(a = 0, b =1, lwd = 2, col = "Red")
text(x = 7, y = 2, paste("ACCEPTANCE REGION ( mu = ",
x@extra$mu, ")."))
text(x = 7, y = 1, paste("Prob. accepting device (simulated) =",
x@probabilities))
text(x = 2, y = 8, paste("Prob. tolerable error = ",
x@extra$ptolerror))
} else {
cat("\n")
cat("No plot to show. Set 'simulate = TRUE' to get simulated results.")
cat("\n")
}
}) |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
path_dancing_banana <- system.file("img", "Rlogo-banana.tif",
package = "ijtiff")
print(path_dancing_banana)
pacman::p_load(ijtiff, magrittr)
img_dancing_banana <- read_tif(path_dancing_banana)
print(img_dancing_banana)
d <- dim(img_dancing_banana)
reds <- purrr::map(seq_len(d[4]), ~ img_dancing_banana[, , 1, .]) %>%
purrr::reduce(cbind)
greens <- purrr::map(seq_len(d[4]), ~ img_dancing_banana[, , 2, .]) %>%
purrr::reduce(cbind)
blues <- purrr::map(seq_len(d[4]), ~ img_dancing_banana[, , 3, .]) %>%
purrr::reduce(cbind)
to_display <- array(0, dim = c(3 * nrow(reds), ncol(reds), 3, 1))
to_display[seq_len(nrow(reds)), , 1, ] <- reds
to_display[seq_len(nrow(reds)) + nrow(reds), , 2, ] <- greens
to_display[seq_len(nrow(reds)) + 2 * nrow(reds), , 3, ] <- blues
display(to_display)
img_dancing_banana357 <- read_tif(path_dancing_banana, frames = c(3, 5, 7))
d <- dim(img_dancing_banana357)
reds <- purrr::map(seq_len(d[4]), ~ img_dancing_banana357[, , 1, .]) %>%
purrr::reduce(cbind)
greens <- purrr::map(seq_len(d[4]), ~ img_dancing_banana357[, , 2, .]) %>%
purrr::reduce(cbind)
blues <- purrr::map(seq_len(d[4]), ~ img_dancing_banana357[, , 3, .]) %>%
purrr::reduce(cbind)
to_display <- array(0, dim = c(3 * nrow(reds), ncol(reds), 3, 1))
to_display[seq_len(nrow(reds)), , 1, ] <- reds
to_display[seq_len(nrow(reds)) + nrow(reds), , 2, ] <- greens
to_display[seq_len(nrow(reds)) + 2 * nrow(reds), , 3, ] <- blues
display(to_display)
path_rlogo <- system.file("img", "Rlogo.tif", package = "ijtiff")
img_rlogo <- read_tif(path_rlogo)
dim(img_rlogo)
class(img_rlogo)
display(img_rlogo)
path_rlogo_grey <- system.file("img", "Rlogo-grey.tif", package = "ijtiff")
img_rlogo_grey <- read_tif(path_rlogo_grey)
dim(img_rlogo_grey)
display(img_rlogo_grey)
path <- tempfile(pattern = "dancing-banana", fileext = ".tif")
print(path)
write_tif(img_dancing_banana, path)
path_txt_img <- system.file("img", "Rlogo-grey.txt", package = "ijtiff")
txt_img <- read_txt_img(path_txt_img)
write_txt_img(txt_img, path = tempfile(pattern = "txtimg", fileext = ".txt")) |
ab_class <- function(ab_class,
only_rsi_columns = FALSE,
only_treatable = TRUE,
...) {
meet_criteria(ab_class, allow_class = "character", has_length = 1, allow_NULL = TRUE)
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
meet_criteria(only_treatable, allow_class = "logical", has_length = 1)
ab_select_exec(NULL, only_rsi_columns = only_rsi_columns, ab_class_args = ab_class, only_treatable = only_treatable)
}
ab_selector <- function(filter,
only_rsi_columns = FALSE,
only_treatable = TRUE,
...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
meet_criteria(only_treatable, allow_class = "logical", has_length = 1)
vars_df <- get_current_data(arg_name = NA, call = -2)
ab_in_data <- get_column_abx(vars_df, info = FALSE, only_rsi_columns = only_rsi_columns,
sort = FALSE, fn = "ab_selector")
call <- substitute(filter)
agents <- tryCatch(AMR::antibiotics[which(eval(call, envir = AMR::antibiotics)), "ab", drop = TRUE],
error = function(e) stop_(e$message, call = -5))
agents <- ab_in_data[ab_in_data %in% agents]
message_agent_names(function_name = "ab_selector",
agents = agents,
ab_group = NULL,
examples = "",
call = call)
structure(unname(agents),
class = c("ab_selector", "character"))
}
aminoglycosides <- function(only_rsi_columns = FALSE, only_treatable = TRUE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
meet_criteria(only_treatable, allow_class = "logical", has_length = 1)
ab_select_exec("aminoglycosides", only_rsi_columns = only_rsi_columns, only_treatable = only_treatable)
}
aminopenicillins <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("aminopenicillins", only_rsi_columns = only_rsi_columns)
}
antifungals <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("antifungals", only_rsi_columns = only_rsi_columns)
}
antimycobacterials <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("antimycobacterials", only_rsi_columns = only_rsi_columns)
}
betalactams <- function(only_rsi_columns = FALSE, only_treatable = TRUE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
meet_criteria(only_treatable, allow_class = "logical", has_length = 1)
ab_select_exec("betalactams", only_rsi_columns = only_rsi_columns, only_treatable = only_treatable)
}
carbapenems <- function(only_rsi_columns = FALSE, only_treatable = TRUE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
meet_criteria(only_treatable, allow_class = "logical", has_length = 1)
ab_select_exec("carbapenems", only_rsi_columns = only_rsi_columns, only_treatable = only_treatable)
}
cephalosporins <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("cephalosporins", only_rsi_columns = only_rsi_columns)
}
cephalosporins_1st <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("cephalosporins_1st", only_rsi_columns = only_rsi_columns)
}
cephalosporins_2nd <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("cephalosporins_2nd", only_rsi_columns = only_rsi_columns)
}
cephalosporins_3rd <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("cephalosporins_3rd", only_rsi_columns = only_rsi_columns)
}
cephalosporins_4th <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("cephalosporins_4th", only_rsi_columns = only_rsi_columns)
}
cephalosporins_5th <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("cephalosporins_5th", only_rsi_columns = only_rsi_columns)
}
fluoroquinolones <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("fluoroquinolones", only_rsi_columns = only_rsi_columns)
}
glycopeptides <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("glycopeptides", only_rsi_columns = only_rsi_columns)
}
lincosamides <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("lincosamides", only_rsi_columns = only_rsi_columns)
}
lipoglycopeptides <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("lipoglycopeptides", only_rsi_columns = only_rsi_columns)
}
macrolides <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("macrolides", only_rsi_columns = only_rsi_columns)
}
oxazolidinones <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("oxazolidinones", only_rsi_columns = only_rsi_columns)
}
penicillins <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("penicillins", only_rsi_columns = only_rsi_columns)
}
polymyxins <- function(only_rsi_columns = FALSE, only_treatable = TRUE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
meet_criteria(only_treatable, allow_class = "logical", has_length = 1)
ab_select_exec("polymyxins", only_rsi_columns = only_rsi_columns, only_treatable = only_treatable)
}
streptogramins <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("streptogramins", only_rsi_columns = only_rsi_columns)
}
quinolones <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("quinolones", only_rsi_columns = only_rsi_columns)
}
tetracyclines <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("tetracyclines", only_rsi_columns = only_rsi_columns)
}
trimethoprims <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("trimethoprims", only_rsi_columns = only_rsi_columns)
}
ureidopenicillins <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
ab_select_exec("ureidopenicillins", only_rsi_columns = only_rsi_columns)
}
administrable_per_os <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
vars_df <- get_current_data(arg_name = NA, call = -2)
ab_in_data <- get_column_abx(vars_df, info = FALSE, only_rsi_columns = only_rsi_columns,
sort = FALSE, fn = "administrable_per_os")
agents_all <- antibiotics[which(!is.na(antibiotics$oral_ddd)), "ab", drop = TRUE]
agents <- antibiotics[which(antibiotics$ab %in% ab_in_data & !is.na(antibiotics$oral_ddd)), "ab", drop = TRUE]
agents <- ab_in_data[ab_in_data %in% agents]
message_agent_names(function_name = "administrable_per_os",
agents = agents,
ab_group = "administrable_per_os",
examples = paste0(" (such as ",
vector_or(ab_name(sample(agents_all,
size = min(5, length(agents_all)),
replace = FALSE),
tolower = TRUE,
language = NULL),
quotes = FALSE),
")"))
structure(unname(agents),
class = c("ab_selector", "character"))
}
administrable_iv <- function(only_rsi_columns = FALSE, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
vars_df <- get_current_data(arg_name = NA, call = -2)
ab_in_data <- get_column_abx(vars_df, info = FALSE, only_rsi_columns = only_rsi_columns,
sort = FALSE, fn = "administrable_iv")
agents_all <- antibiotics[which(!is.na(antibiotics$iv_ddd)), "ab", drop = TRUE]
agents <- antibiotics[which(antibiotics$ab %in% ab_in_data & !is.na(antibiotics$iv_ddd)), "ab", drop = TRUE]
agents <- ab_in_data[ab_in_data %in% agents]
message_agent_names(function_name = "administrable_iv",
agents = agents,
ab_group = "administrable_iv",
examples = "")
structure(unname(agents),
class = c("ab_selector", "character"))
}
not_intrinsic_resistant <- function(only_rsi_columns = FALSE, col_mo = NULL, version_expertrules = 3.3, ...) {
meet_criteria(only_rsi_columns, allow_class = "logical", has_length = 1)
vars_df <- get_current_data(arg_name = NA, call = -2)
ab_in_data <- get_column_abx(vars_df, info = FALSE, only_rsi_columns = only_rsi_columns,
sort = FALSE, fn = "not_intrinsic_resistant")
vars_df_R <- tryCatch(sapply(eucast_rules(vars_df,
col_mo = col_mo,
version_expertrules = version_expertrules,
rules = "expert",
info = FALSE),
function(col) tryCatch(!any(is.na(col)) && all(col == "R"),
error = function(e) FALSE)),
error = function(e) stop_("in not_intrinsic_resistant(): ", e$message, call = FALSE))
agents <- ab_in_data[ab_in_data %in% names(vars_df_R[which(vars_df_R)])]
if (length(agents) > 0 &&
message_not_thrown_before("not_intrinsic_resistant", sort(agents))) {
agents_formatted <- paste0("'", font_bold(agents, collapse = NULL), "'")
agents_names <- ab_name(names(agents), tolower = TRUE, language = NULL)
need_name <- generalise_antibiotic_name(agents) != generalise_antibiotic_name(agents_names)
agents_formatted[need_name] <- paste0(agents_formatted[need_name], " (", agents_names[need_name], ")")
message_("For `not_intrinsic_resistant()` removing ",
ifelse(length(agents) == 1, "column ", "columns "),
vector_and(agents_formatted, quotes = FALSE, sort = FALSE))
}
vars_df_R <- names(vars_df_R)[which(!vars_df_R)]
out <- unname(intersect(ab_in_data, vars_df_R))
structure(out,
class = c("ab_selector", "character"))
}
ab_select_exec <- function(function_name,
only_rsi_columns = FALSE,
only_treatable = FALSE,
ab_class_args = NULL) {
vars_df <- get_current_data(arg_name = NA, call = -3)
ab_in_data <- get_column_abx(vars_df, info = FALSE, only_rsi_columns = only_rsi_columns,
sort = FALSE, fn = function_name)
if (only_treatable == TRUE) {
untreatable <- antibiotics[which(antibiotics$name %like% "-high|EDTA|polysorbate|macromethod|screening|/nacubactam"), "ab", drop = TRUE]
if (any(untreatable %in% names(ab_in_data))) {
if (message_not_thrown_before(function_name, "ab_class", "untreatable", entire_session = TRUE)) {
warning_("Some agents in `", function_name, "()` were ignored since they cannot be used for treating patients: ",
vector_and(ab_name(names(ab_in_data)[names(ab_in_data) %in% untreatable],
language = NULL,
tolower = TRUE),
quotes = FALSE,
sort = TRUE), ". They can be included using `", function_name, "(only_treatable = FALSE)`. ",
"This warning will be shown once per session.",
call = FALSE)
}
ab_in_data <- ab_in_data[!names(ab_in_data) %in% untreatable]
}
}
if (length(ab_in_data) == 0) {
message_("No antimicrobial agents found in the data.")
return(NULL)
}
if (is.null(ab_class_args)) {
abx <- get(paste0("AB_", toupper(function_name)), envir = asNamespace("AMR"))
ab_group <- function_name
examples <- paste0(" (such as ", vector_or(ab_name(sample(abx, size = min(2, length(abx)), replace = FALSE),
tolower = TRUE,
language = NULL),
quotes = FALSE), ")")
} else {
abx <- subset(AB_lookup,
group %like% ab_class_args |
atc_group1 %like% ab_class_args |
atc_group2 %like% ab_class_args)$ab
ab_group <- find_ab_group(ab_class_args)
function_name <- "ab_class"
examples <- paste0(" (such as ", find_ab_names(ab_class_args, 2), ")")
}
agents <- ab_in_data[names(ab_in_data) %in% abx]
message_agent_names(function_name = function_name,
agents = agents,
ab_group = ab_group,
examples = examples,
ab_class_args = ab_class_args)
structure(unname(agents),
class = c("ab_selector", "character"))
}
c.ab_selector <- function(...) {
structure(unlist(lapply(list(...), as.character)),
class = c("ab_selector", "character"))
}
all_any_ab_selector <- function(type, ..., na.rm = TRUE) {
cols_ab <- c(...)
result <- cols_ab[toupper(cols_ab) %in% c("R", "S", "I")]
if (length(result) == 0) {
message_("Filtering ", type, " of columns ", vector_and(font_bold(cols_ab, collapse = NULL), quotes = "'"), ' to contain value "R", "S" or "I"')
result <- c("R", "S", "I")
}
cols_ab <- cols_ab[!cols_ab %in% result]
df <- get_current_data(arg_name = NA, call = -3)
if (type == "all") {
scope_fn <- all
} else {
scope_fn <- any
}
x_transposed <- as.list(as.data.frame(t(df[, cols_ab, drop = FALSE]), stringsAsFactors = FALSE))
vapply(FUN.VALUE = logical(1),
X = x_transposed,
FUN = function(y) scope_fn(y %in% result, na.rm = na.rm),
USE.NAMES = FALSE)
}
all.ab_selector <- function(..., na.rm = FALSE) {
all_any_ab_selector("all", ..., na.rm = na.rm)
}
any.ab_selector <- function(..., na.rm = FALSE) {
all_any_ab_selector("any", ..., na.rm = na.rm)
}
all.ab_selector_any_all <- function(..., na.rm = FALSE) {
out <- unclass(c(...))
if (na.rm == TRUE) {
out <- out[!is.na(out)]
}
out
}
any.ab_selector_any_all <- function(..., na.rm = FALSE) {
out <- unclass(c(...))
if (na.rm == TRUE) {
out <- out[!is.na(out)]
}
out
}
`==.ab_selector` <- function(e1, e2) {
calls <- as.character(match.call())
fn_name <- calls[2]
fn_name <- gsub("^(c\\()(.*)(\\))$", "\\2", fn_name)
if (is_any(fn_name)) {
type <- "any"
} else if (is_all(fn_name)) {
type <- "all"
} else {
type <- "all"
if (length(e1) > 1) {
message_("Assuming a filter on ", type, " ", length(e1), " ", gsub("[\\(\\)]", "", fn_name),
". Wrap around `all()` or `any()` to prevent this note.")
}
}
structure(all_any_ab_selector(type = type, e1, e2),
class = c("ab_selector_any_all", "logical"))
}
`!=.ab_selector` <- function(e1, e2) {
calls <- as.character(match.call())
fn_name <- calls[2]
fn_name <- gsub("^(c\\()(.*)(\\))$", "\\2", fn_name)
if (is_any(fn_name)) {
type <- "any"
} else if (is_all(fn_name)) {
type <- "all"
} else {
type <- "all"
if (length(e1) > 1) {
message_("Assuming a filter on ", type, " ", length(e1), " ", gsub("[\\(\\)]", "", fn_name),
". Wrap around `all()` or `any()` to prevent this note.")
}
}
rsi <- c("R", "S", "I")
e2 <- rsi[rsi != e2]
structure(all_any_ab_selector(type = type, e1, e2),
class = c("ab_selector_any_all", "logical"))
}
`&.ab_selector` <- function(e1, e2) {
structure(intersect(unclass(e1), unclass(e2)),
class = c("ab_selector", "character"))
}
`|.ab_selector` <- function(e1, e2) {
structure(union(unclass(e1), unclass(e2)),
class = c("ab_selector", "character"))
}
is_any <- function(el1) {
syscalls <- paste0(trimws(deparse(sys.calls())), collapse = " ")
el1 <- gsub("(.*),.*", "\\1", el1)
syscalls %like% paste0("[^_a-zA-Z0-9]any\\(", "(c\\()?", el1)
}
is_all <- function(el1) {
syscalls <- paste0(trimws(deparse(sys.calls())), collapse = " ")
el1 <- gsub("(.*),.*", "\\1", el1)
syscalls %like% paste0("[^_a-zA-Z0-9]all\\(", "(c\\()?", el1)
}
find_ab_group <- function(ab_class_args) {
ab_class_args <- gsub("[^a-zA-Z0-9]", ".*", ab_class_args)
AB_lookup %pm>%
subset(group %like% ab_class_args |
atc_group1 %like% ab_class_args |
atc_group2 %like% ab_class_args) %pm>%
pm_pull(group) %pm>%
unique() %pm>%
tolower() %pm>%
sort() %pm>%
paste(collapse = "/")
}
find_ab_names <- function(ab_group, n = 3) {
ab_group <- gsub("[^a-zA-Z|0-9]", ".*", ab_group)
drugs <- antibiotics[which((!is.na(antibiotics$iv_ddd) | !is.na(antibiotics$oral_ddd)) &
antibiotics$name %unlike% " " &
antibiotics$group %like% ab_group &
antibiotics$ab %unlike% "[0-9]$"), ]$name
if (length(drugs) < n) {
drugs <- antibiotics[which((antibiotics$group %like% ab_group |
antibiotics$atc_group1 %like% ab_group |
antibiotics$atc_group2 %like% ab_group) &
antibiotics$ab %unlike% "[0-9]$"), ]$name
}
if (length(drugs) == 0) {
return("??")
}
vector_or(ab_name(sample(drugs, size = min(n, length(drugs)), replace = FALSE),
tolower = TRUE,
language = NULL),
quotes = FALSE)
}
message_agent_names <- function(function_name, agents, ab_group = NULL, examples = "", ab_class_args = NULL, call = NULL) {
if (message_not_thrown_before(function_name, sort(agents))) {
if (length(agents) == 0) {
if (is.null(ab_group)) {
message_("For `", function_name, "()` no antimicrobial agents found", examples, ".")
} else if (ab_group == "administrable_per_os") {
message_("No orally administrable agents found", examples, ".")
} else if (ab_group == "administrable_iv") {
message_("No IV administrable agents found", examples, ".")
} else {
message_("No antimicrobial agents of class '", ab_group, "' found", examples, ".")
}
} else {
agents_formatted <- paste0("'", font_bold(agents, collapse = NULL), "'")
agents_names <- ab_name(names(agents), tolower = TRUE, language = NULL)
need_name <- generalise_antibiotic_name(agents) != generalise_antibiotic_name(agents_names)
agents_formatted[need_name] <- paste0(agents_formatted[need_name], " (", agents_names[need_name], ")")
message_("For `", function_name, "(",
ifelse(function_name == "ab_class",
paste0("\"", ab_class_args, "\""),
ifelse(!is.null(call),
paste0(deparse(call), collapse = " "),
"")),
")` using ",
ifelse(length(agents) == 1, "column ", "columns "),
vector_and(agents_formatted, quotes = FALSE, sort = FALSE))
}
}
} |
make_base_triangulated = function(tris, basedepth=0,basecolor="grey20") {
bd = basedepth
edge_row_max = max(tris[,1])
edge_row_min = min(tris[,1])
edge_col_max = max(tris[,3])
edge_col_min = min(tris[,3])
just_edge_verts = unique(tris[which(tris[,1] == edge_row_max |
tris[,1] == edge_row_min |
tris[,3] == edge_col_max |
tris[,3] == edge_col_min),])
edge_verts = list()
counter = 1
side_r_min = just_edge_verts[just_edge_verts[,1] == edge_row_min,]
side_r_min = side_r_min[order(side_r_min[,3]),]
for(i in 1:(nrow(side_r_min)-1)) {
nr = edge_row_min
edge_verts[[counter]] = matrix(c(nr, nr, nr,bd,side_r_min[i,2],bd,side_r_min[i+1,3],side_r_min[i,3],side_r_min[i,3]),3,3)
counter = counter + 1
edge_verts[[counter]] = matrix(c(nr, nr, nr,bd,side_r_min[i+1,2],side_r_min[i,2],side_r_min[i+1,3],side_r_min[i+1,3],side_r_min[i,3]),3,3)
counter = counter + 1
}
side_r_max = just_edge_verts[just_edge_verts[,1] == edge_row_max,]
side_r_max = side_r_max[rev(order(side_r_max[,3])),]
for(i in 1:(nrow(side_r_max)-1)) {
nr = edge_row_max
edge_verts[[counter]] = matrix(c(nr, nr, nr,bd,side_r_max[i,2],bd,side_r_max[i+1,3],side_r_max[i,3],side_r_max[i,3]),3,3)
counter = counter + 1
edge_verts[[counter]] = matrix(c(nr, nr, nr,bd,side_r_max[i+1,2],side_r_max[i,2],side_r_max[i+1,3],side_r_max[i+1,3],side_r_max[i,3]),3,3)
counter = counter + 1
}
side_c_min = just_edge_verts[just_edge_verts[,3] == edge_col_min,]
side_c_min = side_c_min[rev(order(side_c_min[,1])),]
for(i in 1:(nrow(side_c_min)-1)) {
nc = edge_col_min
edge_verts[[counter]] = matrix(c(side_c_min[i+1,1],side_c_min[i,1],side_c_min[i,1],bd,side_c_min[i,2],bd, nc,nc,nc),3,3)
counter = counter + 1
edge_verts[[counter]] = matrix(c(side_c_min[i+1,1],side_c_min[i+1,1],side_c_min[i,1],bd,side_c_min[i+1,2],side_c_min[i,2],nc,nc,nc),3,3)
counter = counter + 1
}
side_c_max = just_edge_verts[just_edge_verts[,3] == edge_col_max,]
side_c_max = side_c_max[order(side_c_max[,1]),]
for(i in 1:(nrow(side_c_max)-1)) {
nc = edge_col_max
edge_verts[[counter]] = matrix(c(side_c_max[i+1,1],side_c_max[i,1],side_c_max[i,1],bd,side_c_max[i,2],bd, nc,nc,nc),3,3)
counter = counter + 1
edge_verts[[counter]] = matrix(c(side_c_max[i+1,1],side_c_max[i+1,1],side_c_max[i,1],bd,side_c_max[i+1,2],side_c_max[i,2],nc,nc,nc),3,3)
counter = counter + 1
}
fullsides = do.call(rbind,edge_verts)
rgl::triangles3d(fullsides,
lit=FALSE,color=basecolor,front="filled",back="culled",tag = "base")
base_entries = unique(fullsides[fullsides[,2] == basedepth,])
base_entries_up = base_entries[base_entries[,1] == edge_row_max | base_entries[,1] == edge_row_min, ]
base_entries_up = base_entries_up[order(base_entries_up[,1],base_entries_up[,3]),]
base_entries_up1 = base_entries_up[base_entries_up[,1] == edge_row_min,]
base_entries_up1 = base_entries_up1[nrow(base_entries_up1):1,]
base_entries_up2 = base_entries_up[base_entries_up[,1] == edge_row_max,]
base_entries_side = base_entries[base_entries[,3] == edge_col_max | base_entries[,3] == edge_col_min, ]
base_entries_side = base_entries_side[order(base_entries_side[,3],base_entries_side[,1]),]
base_entries_side1 = base_entries_side[base_entries_side[,3] == edge_col_min,]
base_entries_side2 = base_entries_side[base_entries_side[,3] == edge_col_max,]
base_entries_side2 = base_entries_side2[nrow(base_entries_side2):1,]
edges = unique(do.call(rbind,list(base_entries_up1,base_entries_side1,base_entries_up2, base_entries_side2)))
edges = rbind(edges,edges[1,])
full_bottom_final = list()
counter = 1
center = c(0,basedepth,0)
for(i in 1:(nrow(edges)-1)) {
full_bottom_final[[counter]] = matrix(c(edges[i,],edges[i+1,],center), 3,3,byrow=TRUE)
counter = counter + 1
}
bottom_tris = do.call(rbind,full_bottom_final)
rgl::triangles3d(bottom_tris, lit=FALSE,color=basecolor,front="filled",back="culled",tag = "base")
} |
context("Test Incomplete Gamma and Small Sigma Limit")
eps <- 0.001
eps5 <- 0.00001
sigma <- 0.001
lambdas <- c(1, 2, 2.5, 3)
test_that("test Gamma(s,x) function, x=0",{
s <- seq(0.5, 5, by=0.5)
g1 <- ecld.gamma(s, 0)
g2 <- gamma(s)
expect_true(max(abs(g1-g2)) < eps5)
})
test_that("test Gamma(101, 1)",{
g1 <- ecld.gamma(101, ecd.mp1)
g2 <- ecd.mpfr(9.3326215443944)*ecd.mpfr(10)^157
expect_true(max(abs(g1/g2-1)) < eps5)
})
test_that("test Gamma(200, 2)",{
g1 <- ecld.gamma(200, 2*ecd.mp1)
g2 <- ecd.mpfr(3.94328933682395)*ecd.mpfr(10)^372
expect_true(max(abs(g1/g2-1)) < eps5)
})
test_that("test lgamma in ecld.mgf_term",{
ld <- ecld(3, 0.05)
n <- c(10, 50, 100)
g1 <- ecld.mgf_term(ld, n)
g2 <- ecld.mgf_term_original(ld, n)
expect_true(max(abs(g1/g2-1)) < eps5)
})
for (lambda in lambdas) {
test_that(paste("test Gamma(s,x) hypergeo expansion, s=", lambda),{
x <- 10
order <- 10
g1 <- ecld.gamma(lambda, x)
g2 <- ecld.gamma_hgeo(lambda, x, order)
expect_true(abs(g2/g1-1) < eps)
})
ld0 <- ecld(lambda=lambda, sigma=0.001*ecd.mp1)
mu_D <- ecld.mu_D(ld0)
ld <- ecld(lambda=lambda, sigma=ld0@sigma, mu=mu_D)
ki <- c(2,4)*lambda
k <- ki*ld@sigma + ld@mu
test_that(paste("test star OGF vs full OGF, lambda=", lambda),{
g1 <- ecld.ogf(ld, k, otype="c")
g2 <- ecld.ogf_star(ld, ki) *ld@sigma *exp(ld@mu)
err = max(abs(g2/g1-1))
expect_true(err < 0.02)
})
test_that(paste("test star OGF btw gamma and hgeo, lambda=", lambda),{
g1 <- ecld.ogf_star(ld, ki)
g2 <- ecld.ogf_star_hgeo(ld, ki, order=4)
err = max(abs(g2/g1-1))
expect_true(err < ifelse(lambda==1, 0.15, 0.01))
})
test_that(paste("test identity of star OGF btw hgeo and exp, lambda=", lambda),{
g1 <- ecld.ogf_star_hgeo(ld, ki, order=4)
g2 <- ecld.ogf_star_exp(ld, ki, order=3)
err = max(abs(g2/g1-1))
expect_true(err < eps)
})
test_that(paste("test star OGF btw gamma and gamma_star, lambda=", lambda),{
ki <- c(0, 0.25, 0.5, 0.75, 1)
g1 <- ecld.ogf_star(ld, ki)
g2 <- ecld.ogf_star_gamma_star(ld, ki)
err = max(abs(g2/g1-1))
expect_true(err < eps)
})
} |
plot.YPmodel.martint <-
function(x=c(), Internal=c(), ...)
{
Data <- x$Data
rTestData <- x$rTestData
Estimate <- x$Estimate
LackFitTest <- x
if(is.null(Internal)){
Internal <- fun.internalParameters(Data=Data, Estimate=Estimate)
}
X <- Data$X
kk <- Internal$kk
wtildCount1 <- LackFitTest$wtildCount1
lineCount1 <- LackFitTest$lineCount1
obs <- LackFitTest$obs
plot(X[1:kk]*365,wtildCount1[,1],"l",lty = "dotted",col="red",xlab="Days", ylab=" ",xlim=c(1, max(X*365)),ylim=c(min(obs,wtildCount1), max(obs,wtildCount1)))
for(i in 2:lineCount1){
lines(X[1:kk]*365,wtildCount1[,i],"l",lty = "dotted",col="red")
}
lines(X[1:kk]*365,obs[1:kk],"l",col="blue",lwd=2)
title(main="Plots of the martingale residual-based test statistic \r\n and randomly selected realizations of the process")
} |
quiver2D.vector <- function(u, v, x = NULL, y = NULL, colvar = NULL, ...,
scale = 1, arr.max = 0.2, arr.min = 0, speed.max = NULL,
by = NULL, type = "triangle",
col = NULL, NAcol = "white", breaks = NULL, colkey = NULL,
clim = NULL, clab = NULL, add = FALSE, plot = TRUE) {
if (! is.vector(u) | ! is.vector (v))
stop ("'u' anc 'v' should be a vector")
dots <- splitpardots( list(...) )
dp <- dots$points
dm <- dots$main
if (add)
plist <- getplist()
else plist <- NULL
setplist(plist)
if (! is.null(colvar)) {
varlim <- clim
if (is.null(varlim))
varlim <- range(colvar, na.rm = TRUE)
if (any (dim(colvar) - c(nrow(u), ncol(v)) != 0))
stop ("dimension of 'colvar' not compatible with dimension of 'u' and 'v'")
if (! is.null(by)) {
ix <- seq(1, length(u), by = by)
colvar <- colvar[ix]
}
if (is.null(col) & is.null(breaks))
col <- jet.col(100)
else if (is.null(col))
col <- jet.col(length(breaks)-1)
breaks <- check.breaks(breaks, col)
if (dots$clog) {
colvar <- log(colvar)
if (! is.null(clim))
clim <- log(clim)
}
iscolkey <- is.colkey(colkey, col)
if (iscolkey) {
colkey <- check.colkey(colkey)
if (! add)
plist$plt$main <- colkey$parplt
}
par (plt = plist$plt$main)
if (is.null(clim))
clim <- range(colvar, na.rm = TRUE)
Col <- variablecol(colvar, col, NAcol, clim, breaks)
pltori <- plist$plt$ori
} else {
Col <- col
if (is.null(Col))
Col <- "black"
iscolkey <- FALSE
}
Log <- FALSE
if (! is.null(dm$log)) {
if (length(grep("a", dm[["log"]])) > 0) {
dm[["log"]] <- gsub("a", "", dm[["log"]])
Log <- TRUE
if (dm[["log"]] == "")
dm[["log"]] <- NULL
}
}
maxspeed <- speed.max
speed <- sqrt(u^2 + v^2)
if (is.null(maxspeed))
maxspeed <- max(speed)
else {
if (maxspeed <= 0)
stop ("'speed.max' should be >= 0")
speed <- pmin(speed, maxspeed)
}
if (maxspeed == 0)
maxspeed <- 1e-16
if (!is.null(plist$quiver)) {
xr <- plist$quiver$xr
yr <- plist$quiver$yr
maxspeed <- plist$quiver$maxspeed
if (Log) maxspeed <- exp(maxspeed)
} else {
xr <- diff (range(x)) /diff (range(u))
yr <- diff (range(y)) / diff (range(v))
}
if (!is.null(scale)) {
u <- u * scale / maxspeed * xr
v <- v * scale / maxspeed * yr
}
xto <- x + u
yto <- y + v
if (Log) speed <- log(speed)
if (Log) maxspeed <- log(maxspeed)
dp$length <- speed / maxspeed * (arr.max - arr.min) + arr.min
if (plot) {
if (! add) {
if (is.null(dm$xlab))
dm$xlab <- "x"
if (is.null(dm$ylab))
dm$ylab <- "y"
}
dp$arr.type <- NULL
if (is.null(dp$lwd))
dp$lwd <- 1
do.call("arrows2D", c(alist(x, y, xto, yto, col = Col, type = type, add = add), dm, dp))
if (iscolkey) {
colkey$parleg <- colkey$parplt <- NULL
do.call("colkey", c(alist(col = col, clim = varlim, clab = clab,
clog = dots$clog, add = TRUE), colkey))
par(plt = pltori)
}
par(mar = par("mar"))
}
if (Log) maxspeed <- exp(maxspeed)
if (! add) {
plist <- getplist()
plist$quiver <- list(xr = xr, yr = yr, maxspeed = maxspeed)
setplist(plist)
}
invisible(list(x0 = x, y0 = y, x1 = xto, y1 = yto, col = Col, length = dp$length,
speed.max = maxspeed))
} |
getCoefficients <- function(model) {
x <- summary(model)
y <- x$coefficients
x <- data.frame(Col = row.names(y), y)
return(x)
} |
source("ESEUR_config.r")
library("deming")
library("lmodel2")
plot_layout(1, 2)
ks=read.csv(paste0(ESEUR_dir, "regression/kernel_stats.csv.xz"), as.is=TRUE)
ks_na=na.omit(ks)
x = ks_na$number.developers
y = ks_na$commits
d_mod=deming(x ~ y)
l_mod=lmodel2(x ~ y)
yx_line = glm(y ~ x)
xy_line = glm(x ~ y)
plot(y ~ x,
xlab="Developers", ylab="Commits\n")
abline(reg=yx_line, col="red")
segments(x, y, x, fitted(yx_line), lty = 2, col = "red")
par(new=FALSE)
plot(y ~ x,
xlab="Developers", ylab="Commits\n")
abline(reg=yx_line, col="red")
xy_line.coef=xy_line$coefficients
abline(coef=c(-xy_line.coef[1]/xy_line.coef[2], 1/xy_line.coef[2]), col="green")
segments(x, y, fitted(xy_line), y, lty = 2, col = "green") |
anova2 <-
function (x, xname = deparse(substitute(x)), log = FALSE, ifalt = FALSE)
{
n <- length(x)
ndup <- n/2
x1 <- numeric(ndup)
x2 <- numeric(ndup)
if (ifalt) {
for (i in 1:ndup) {
j <- 2 * (i - 1) + 1
x1[i] <- x[j]
x2[i] <- x[j + 1]
}
}
else {
for (i in 1:ndup) {
x1[i] <- x[i]
x2[i] <- x[ndup + i]
}
}
anova1(x1, x2, xname = xname, log = log)
invisible()
} |
a <- 1
b <- 5
cc <- 7
library(rdtLite)
prov.init(prov.dir = "testdata", snapshot.size="10")
prov.source("testscripts/source_fromEnv.R")
prov.quit() |
library(loo)
context("loo_model_weights")
set.seed(123)
y<-rnorm(50,0,1)
sd_sim1<- abs(rnorm(500,1.5, 0.1))
sd_sim2<- abs(rnorm(500,1.2, 0.1))
sd_sim3<- abs(rnorm(500,1, 0.05))
log_lik1 <- log_lik2 <- log_lik3 <- matrix(NA, 500, 50)
for(s in 1:500) {
log_lik1[s,] <- dnorm(y,-1,sd_sim1[s], log=T)
log_lik2[s,] <- dnorm(y,0.7,sd_sim2[s], log=T)
log_lik3[s,] <- dnorm(y,1,sd_sim3[s], log=T)
}
ll_list <- list(log_lik1, log_lik2,log_lik3)
r_eff_list <- list(rep(0.9,50), rep(0.9,50), rep(0.9,50))
loo_list <- lapply(1:length(ll_list), function(j) {
loo(ll_list[[j]], r_eff = r_eff_list[[j]])
})
tol <- 0.01
test_that("loo_model_weights throws correct errors and warnings", {
expect_error(loo_model_weights(log_lik1), "list of matrices or a list of 'psis_loo' objects")
expect_error(loo_model_weights(list(log_lik1)), "At least two models")
expect_error(loo_model_weights(list(loo_list[[1]])), "At least two models")
expect_error(loo_model_weights(list(log_lik1), method = "pseudobma"), "At least two models")
expect_error(loo_model_weights(list(log_lik1, log_lik2[-1, ])), "same dimensions")
expect_error(loo_model_weights(list(log_lik1, log_lik2, log_lik3[, -1])), "same dimensions")
loo_list2 <- loo_list
attr(loo_list2[[3]], "dims") <- c(10, 10)
expect_error(loo_model_weights(loo_list2), "same dimensions")
expect_error(loo_model_weights(ll_list, r_eff_list = r_eff_list[-1]),
"one component for each model")
r_eff_list[[3]] <- rep(0.9, 51)
expect_error(loo_model_weights(ll_list, r_eff_list = r_eff_list),
"same length as the number of columns")
expect_error(loo_model_weights(list(loo_list[[1]], 2)),
"List elements must all be 'psis_loo' objects or log-likelihood matrices",
fixed = TRUE)
expect_warning(loo_model_weights(ll_list), "Relative effective sample sizes")
})
test_that("loo_model_weights (stacking and pseudo-BMA) gives expected result", {
w1 <- loo_model_weights(ll_list, method = "stacking", r_eff_list = r_eff_list)
expect_type(w1,"double")
expect_s3_class(w1, "stacking_weights")
expect_length(w1, 3)
expect_named(w1, paste0("model" ,c(1:3)))
expect_equal_to_reference(as.numeric(w1), "reference-results/model_weights_stacking.rds",
tolerance = tol, scale=1)
expect_output(print(w1), "Method: stacking")
w1_b <- loo_model_weights(loo_list)
expect_identical(w1, w1_b)
w2 <- loo_model_weights(ll_list, r_eff_list=r_eff_list,
method = "pseudobma", BB = TRUE)
expect_type(w2, "double")
expect_s3_class(w2, "pseudobma_bb_weights")
expect_length(w2, 3)
expect_named(w2, paste0("model", c(1:3)))
expect_equal_to_reference(as.numeric(w2), "reference-results/model_weights_pseudobma.rds",
tolerance = tol, scale=1)
expect_output(print(w2), "Method: pseudo-BMA+")
w3 <- loo_model_weights(ll_list, r_eff_list=r_eff_list,
method = "pseudobma", BB = FALSE)
expect_type(w3,"double")
expect_length(w3, 3)
expect_named(w3, paste0("model" ,c(1:3)))
expect_equal(as.numeric(w3), c(5.365279e-05, 9.999436e-01, 2.707028e-06),
tolerance = tol, scale = 1)
expect_output(print(w3), "Method: pseudo-BMA")
w3_b <- loo_model_weights(loo_list, method = "pseudobma", BB = FALSE)
expect_identical(w3, w3_b)
})
test_that("stacking_weights and pseudobma_weights throw correct errors", {
xx <- cbind(rnorm(10))
expect_error(stacking_weights(xx), "two models are required")
expect_error(pseudobma_weights(xx), "two models are required")
}) |
if (require("testthat") && require("sjmisc")) {
x <- c(NA, NA, NA)
y <- c(1, NA, NA)
test_that("all_na", {
expect_true(all_na(x))
})
test_that("all_na", {
expect_false(all_na(y))
})
test_that("all_na, data.frame", {
expect_is(all_na(data.frame(x, y)), "data.frame")
})
} |
tau_copula <- function(eta, copula){
if (tolower(copula) == "copula2"){
alpha <- eta[1]
kappa <- eta[2]
output <- 1 - 2 * alpha * kappa/(1 + 2 * kappa)
}
if (tolower(copula) != "copula2") {
output <- tau(archmCopula(tolower(copula), param = eta, dim = 2))
}
return(output)
} |
STATSboot <-function(X,z,start,lambda1,lambda2,estimation,out){
items <- colnames(X)
dat <- X[z,]
if (any(is.na(dat))){
mice_dat <- data.frame(sapply(dat, as.factor))
new.pred <- quickpred(mice_dat, method = 'kendall', mincor = out$CALL$mincor)
mice_imp <- mice(mice_dat,1, method = 'myfunc', printFlag = FALSE, predictorMatrix = new.pred, maxit = 20)
mdat <- complete(mice_imp,1)
dat <- data.frame(sapply(mdat,function(y) as.numeric(levels(y))[y]))
}
dat <- apply(dat, 2, function(x){
if (any(is.na(x))){
x[is.na(x)] <- getmode(x)
}
x
})
fit <- mudfold(dat, start.scale = start,lambda1 = lambda1,lambda2 = lambda2, estimation = estimation,nboot = NULL)
stats_list_scale <- lapply(vector("list", 6 ),function(x) x <- NA)
stats_list_Hitems <- lapply(vector("list", length(items)),function(x) x <- NA)
stats_list_ISOitems <- lapply(vector("list", length(items)),function(x) x <- NA)
stats_list_MAXitems <- lapply(vector("list", length(items)),function(x) x <- NA)
stats_list_EOitems <- lapply(vector("list", length(items)),function(x) x <- NA)
stats_list_Oitems <- lapply(vector("list", length(items)),function(x) x <- NA)
if (!is.null(fit$MUDFOLD_INFO$second_step$scale)){
vecH <- rep(NA,length(items))
vecsH <- rep(NA,length(items))
vecISO <- rep(NA,length(items))
vecMAX <- rep(NA,length(items))
vecEO <- rep(NA,length(items))
vecO <- rep(NA,length(items))
stats_list_scale[[1]] <- paste(fit$MUDFOLD_INFO$second_step$scale,collapse = " ")
stats_list_scale[[2]] <- fit$MUDFOLD_INFO$second_step$Hscale
stats_list_scale[[3]] <- fit$MUDFOLD_INFO$second_step$ISOscale
stats_list_scale[[4]] <- fit$MUDFOLD_INFO$second_step$MAXscale
stats_list_scale[[5]] <- fit$MUDFOLD_INFO$second_step$EXPscale
stats_list_scale[[6]] <- fit$MUDFOLD_INFO$second_step$OBSscale
mdf_items <- fit$MUDFOLD_INFO$second_step$scale
ids <- match(mdf_items,items)
vecH[ids] <- fit$MUDFOLD_INFO$second_step$Hitem
vecISO[ids] <- fit$MUDFOLD_INFO$second_step$ISOitem
vecMAX[ids] <- fit$MUDFOLD_INFO$second_step$MAXitem
vecEO[ids] <- fit$MUDFOLD_INFO$second_step$EXPitem
vecO[ids] <- fit$MUDFOLD_INFO$second_step$OBSitem
for (i in ids){
stats_list_Hitems[[i]] <- vecH[i]
stats_list_ISOitems[[i]] <- vecISO[i]
stats_list_MAXitems[[i]] <- vecMAX[i]
stats_list_EOitems[[i]] <- vecEO[i]
stats_list_Oitems[[i]] <- vecO[i]
}
}
stats <- do.call(cbind,
c(stats_list_scale,
stats_list_Hitems,
stats_list_ISOitems,
stats_list_MAXitems,
stats_list_EOitems,
stats_list_Oitems))
return(stats)
} |
pairwise_count <- function(tbl, item, feature, wt = NULL, ...) {
pairwise_count_(tbl,
col_name(substitute(item)),
col_name(substitute(feature)),
wt = col_name(substitute(wt)),
...)
}
pairwise_count_ <- function(tbl, item, feature, wt = NULL, ...) {
if (is.null(wt)) {
func <- squarely_(function(m) m %*% t(m), sparse = TRUE, ...)
wt <- "..value"
} else {
func <- squarely_(function(m) m %*% t(m > 0), sparse = TRUE, ...)
}
tbl %>%
distinct_(.dots = c(item, feature), .keep_all = TRUE) %>%
mutate(..value = 1) %>%
func(item, feature, wt) %>%
rename(n = value)
} |
pgflogarithmic <-
function(s,params) {
k<-s[abs(s)>1]
if (length(k)>0)
warning("At least one element of the vector s are out of interval [-1,1]")
if (length(params)>1) stop("The length of params is 1")
theta<-params[1]
if ((theta>=1)|(theta<=0))
stop ("Parameter theta belongs to the interval (0,1)")
log(1-(1-theta)*s)/log(theta)
} |
library(dynaTree)
data(elec2)
X <- elec2[,1:4]
y <- drop(elec2[,5])
T <- nrow(X)
hits <- rep(NA, T)
hits <- data.frame(f=hits, h=hits, hd=hits, hr=hits, hdr=hits)
n <- 25
N <- 1000
ffit <- dynaTree(X[1:n,], y[1:n], N=N, model="class")
hfit <- copy(ffit)
hdfit <- copy(ffit)
hrfit <- copy(ffit)
hdrfit <- copy(ffit)
w <- 1
for(t in (n+1):T) {
ffit <- predict(ffit, XX=X[t,], yy=y[t])
hits$f[t] <- which.max(ffit$p) == y[t]
hfit <- predict(hfit, XX=X[t,], yy=y[t])
hits$h[t] <- which.max(hfit$p) == y[t]
hdfit <- predict(hdfit, XX=X[t,], yy=y[t])
hits$hd[t] <- which.max(hdfit$p) == y[t]
hrfit <- predict(hrfit, XX=X[t,], yy=y[t])
hits$hr[t] <- which.max(hrfit$p) == y[t]
hdrfit <- predict(hdrfit, XX=X[t,], yy=y[t])
hits$hdr[t] <- which.max(hdrfit$p) == y[t]
if(any(hfit$X[w,] != X[t-n,])) stop("bad retiring in h")
if(any(hdfit$X[w,] != X[t-n,])) stop("bad retiring in hd")
if(any(hrfit$X[w,] != X[t-n,])) stop("bad retiring in hr")
if(any(hdrfit$X[w,] != X[t-n,])) stop("bad retiring in hdr")
hfit <- retire(hfit, w)
hdfit <- retire(hdfit, w, lambda=0.9)
hrfit <- retire(hrfit, w)
hdrfit <- retire(hdrfit, w, lambda=0.9)
w <- w + 1; if(w >= n) w <- 1
if(t %% n == 0) {
hrfit <- rejuvenate(hrfit, odr=1:(n-1), verb=0)
hdrfit <- rejuvenate(hdrfit, odr=1:(n-1), verb=0)
}
ffit <- update(ffit, X[t,], y[t], verb=100)
hfit <- update(hfit, X[t,], y[t], verb=100)
hdfit <- update(hdfit, X[t,], y[t], verb=100)
hrfit <- update(hrfit, X[t,], y[t], verb=100)
hdrfit <- update(hdrfit, X[t,], y[t], verb=100)
}
deleteclouds()
apply(hits, 2, mean, na.rm=TRUE)
n <- 25
rhits <- matrix(0, nrow=nrow(hits), ncol=ncol(hits))
for(i in (n+1):nrow(hits)) {
rhits[i,] <- 0.05*as.numeric(hits[i,]) + 0.95*rhits[i-1,]
}
matplot(rhits, type="l") |
library(dplyr)
data(djan)
output_table <- hydro_events(dataframe = djan,
q = discharge,
datetime = time,
window = 21)
output_table %>%
filter(he == 2) %>%
AHI(q = discharge, ssc = SS) |
flin <- function(x, b, d)
{
d + b*x
}
invlin <- function(y, b, d)
{
(y - d) /b
}
formExp3p <- as.formula(signal ~ d + b * (exp(dose/e) - 1) )
startvalExp3pnls.1 <- function(xm, ym, increase, Ushape)
{
d <- ym[1]
eabs <- 0.1*max(xm)
if ((increase & !Ushape) | (!increase & Ushape))
{
c <- ym[which.max(xm)]
b <- d - c
e <- -eabs
} else
{
e <- eabs
reg <- lm(ym ~ exp(xm / e))
b <- coef(reg)[2]
}
startval <- list(b = b, d = d, e = e)
}
startvalExp3pnls.2 <- function(xm, ym, increase, Ushape)
{
d <- ym[1]
eabs <- max(xm)
if ((increase & !Ushape) | (!increase & Ushape))
{
c <- ym[which.max(xm)]
b <- d - c
e <- -eabs
} else
{
e <- eabs
reg <- lm(ym ~ exp(xm / e))
b <- coef(reg)[2]
}
startval <- list(b = b, d = d, e = e)
}
fExpo <- function(x, b, d, e)
{
d + b * (exp(x/e) - 1)
}
invExpo <- function(y, b, d, e)
{
if ( ((e < 0) & (b < 0) & (y > d - b)) | ((e < 0) & (b > 0) & (y < d - b)) )
return(NaN) else
return(e * log(1 + (y - d) / b))
}
formHill <- as.formula(signal ~ c + (d - c) / (1 + (dose/e)^b ) )
startvalHillnls2 <- function(x, y, xm, ym, increase)
{
maxi <- max(y, na.rm = TRUE)
mini <- min(y, na.rm = TRUE)
ampl <- maxi - mini
maxi <- maxi + 0.001 * ampl
mini <- mini - 0.001 * ampl
c <- ifelse(increase, maxi, mini)
d <-ifelse(increase, mini, maxi)
yreg <- log((d - c) / (y[x!=0] - c) - 1)
xreg <- log(x[x!=0])
reg <- lm(yreg ~ xreg)
b <- reg$coefficients[2]
e <- reg$coefficients[1] / (-b)
startval <- list(b = b, c = c, d = d, e = e)
}
fHill <- function(x, b, c, d, e)
{
c + (d - c) / (1 + (x/e)^b)
}
invHill <- function(y, b, c, d, e)
{
if ( ((d < c) & (y > c)) | ((d > c) & (y < c)) )
return(NaN) else
return(e * ((d - y) / (y - c))^(1/b))
}
formGauss5p <- as.formula(signal ~ f * exp(-0.5 * ((dose-e)/b)^2) + d + (c - d) * pnorm((dose-e)/b))
startvalGauss5pnls <- function(xm, ym, Ushape)
{
d <- ym[1]
c <- ym[which.max(xm)]
yextremum <- ifelse(Ushape, min(ym), max(ym))
f <- yextremum - (c + d) / 2
b <- max(xm) / 4
xextremum <- median(xm[which(ym == yextremum)])
e <- min(xextremum - (c - d)*b/(f*sqrt(2*pi)), 1e-6)
startval <- list(b = b, c = c, d = d, e = e, f = f)
}
formGauss4p <- as.formula(signal ~ f * exp(-0.5 * ((dose-e)/b)^2) + d )
startvalGauss4pnls <- function(xm, ym, Ushape)
{
d <- ym[which.max(xm)]
yextremum <- ifelse(Ushape, min(ym), max(ym))
f <- yextremum - d
b <- max(xm) / 4
xextremum <- median(xm[which(ym == yextremum)] )
e <- min(xextremum, 1e-6)
startval <- list(b = b, d = d, e = e, f = f)
}
formprobit <- as.formula(signal ~ d + (c - d) * pnorm((dose-e)/b))
fGauss5p <- function(x, b, c, d, e, f)
{
f * exp(-0.5 * ((x-e)/b)^2) +
d + (c - d) * pnorm((x-e)/b)
}
fGauss5pBMR <- function(x, b, c, d, e, g, threshold)
{
g * exp(-0.5 * ((x-e)/b)^2) +
d + (c - d) * pnorm((x-e)/b) - threshold
}
fGauss5pBMR_xinlog <- function(xinlog, b, c, d, e, g, threshold)
{
x <- exp(xinlog)
g * exp(-0.5 * ((x-e)/b)^2) +
d + (c - d) * pnorm((x-e)/b) - threshold
}
fprobit <- function(x, b, c, d, e)
{
d + (c - d) * pnorm((x - e)/b)
}
invprobit <- function(y, b, c, d, e)
{
if ( ((d < c) & (y > c)) | ((d > c) & (y < c)) )
return(NaN) else
return(e + b *qnorm((y - d) / (c - d)))
}
fGauss5poutofrange <- function(fit, signalmin, signalmax)
{
par <- coef(fit)
b.i <- par["b"]
c.i <- par["c"]
d.i <- par["d"]
e.i <- par["e"]
f.i <- par["f"]
xextr.i <- e.i + (c.i - d.i)*b.i / (f.i*sqrt(2*pi))
yextr.i <- fGauss5p(xextr.i, b = b.i, c = c.i, d = d.i, e = e.i, f = f.i)
outofrange <- (yextr.i > signalmax) | (yextr.i < signalmin)
}
fGauss4poutofrange <- function(fit, signalmin, signalmax)
{
par <- coef(fit)
b.i <- par["b"]
c.i <- par["d"]
d.i <- par["d"]
e.i <- par["e"]
f.i <- par["f"]
xextr.i <- e.i + (c.i - d.i)*b.i / (f.i*sqrt(2*pi))
yextr.i <- fGauss5p(xextr.i, b = b.i, c = c.i, d = d.i, e = e.i, f = f.i)
outofrange <- (yextr.i > signalmax) | (yextr.i < signalmin)
}
formLGauss5p <- as.formula(signal ~ f * exp(-0.5 * (log(dose/e)/b)^2) + d + (c - d) * pnorm(log(dose/e)/b))
startvalLGauss5pnls <- function(xm, ym, Ushape)
{
d <- ym[1]
c <- ym[which.max(xm)]
yextremum <- ifelse(Ushape, min(ym), max(ym))
f <- yextremum - (c + d) / 2
b <- (log(max(xm)) - log(min(xm[xm!=0]))) / 4
xextremum <- median(xm[which(ym == yextremum)])
e <- xextremum * exp(- (c - d)*b/(f*sqrt(2*pi)))
startval <- list(b = b, c = c, d = d, e = e, f = f)
}
formLGauss4p <- as.formula(signal ~ f * exp(-0.5 * (log(dose/e)/b)^2) + d)
startvalLGauss4pnls <- function(xm, ym, Ushape)
{
d <- ym[1]
yextremum <- ifelse(Ushape, min(ym), max(ym))
f <- yextremum - d
b <- (log(max(xm)) - log(min(xm[xm!=0]))) / 4
xextremum <- median(xm[which(ym == yextremum)])
e <- xextremum
startval <- list(b = b, d = d, e = e, f = f)
}
formLprobit <- as.formula(signal ~ d + (c - d) * pnorm(log(dose/e)/b))
fLGauss5p <- function(x, b, c, d, e, f)
{
f * exp(-0.5 * (log(x/e)/b)^2) +
d + (c - d) * pnorm(log(x/e)/b)
}
fLGauss5pBMR <- function(x, b, c, d, e, g, threshold)
{
g * exp(-0.5 * (log(x/e)/b)^2) +
d + (c - d) * pnorm(log(x/e)/b) - threshold
}
fLGauss5pBMR_xinlog <- function(xinlog, b, c, d, e, g, threshold)
{
g * exp(-0.5 * ((xinlog - log(e))/b)^2) +
d + (c - d) * pnorm((xinlog - log(e))/b) - threshold
}
fLprobit <- function(x, b, c, d, e)
{
d + (c - d) * pnorm(log(x/e)/b)
}
invLprobit <- function(y, b, c, d, e)
{
if ( ((d < c) & (y > c)) | ((d > c) & (y < c)) )
return(NaN) else
return(e * exp(qnorm((y - d) / (c - d)) *b))
}
fLGauss5poutofrange <- function(fit, signalmin, signalmax)
{
par <- coef(fit)
b.i <- par["b"]
c.i <- par["c"]
d.i <- par["d"]
e.i <- par["e"]
f.i <- par["f"]
xextr.i <- exp(log(e.i) + (c.i - d.i)*b.i/(f.i*sqrt(2*pi)))
yextr.i <- fLGauss5p(xextr.i, b = b.i, c = c.i, d = d.i, e = e.i, f = f.i)
outofrange <- (yextr.i > signalmax) | (yextr.i < signalmin)
}
fLGauss4poutofrange <- function(fit, signalmin, signalmax)
{
par <- coef(fit)
b.i <- par["b"]
c.i <- par["d"]
d.i <- par["d"]
e.i <- par["e"]
f.i <- par["f"]
xextr.i <- exp(log(e.i) + (c.i - d.i)*b.i/(f.i*sqrt(2*pi)))
yextr.i <- fLGauss5p(xextr.i, b = b.i, c = c.i, d = d.i, e = e.i, f = f.i)
outofrange <- (yextr.i > signalmax) | (yextr.i < signalmin)
} |
rspectrum <- function(
x,
w,
n = TRUE,
env,
...
){
i <- get("i", envir = env)
assign("i", i + 1, envir = env)
if ( w == sqrt(length(x)) ){
im <- matrix(x,w,w)
fftim <- Mod(stats::fft(im))^2
offset <- ceiling(dim(im)[1]/2)
r <- sqrt((col(im)-offset)^2 + (row(im)-offset)^2)
rspec <- rev(raster::zonal(
raster::raster(fftim),
raster::raster(r),
fun = 'mean',
na.rm = TRUE)[,2])
if (n){
rspec <- rspec/stats::sd(im,na.rm=TRUE)
}
rspec[1:2] <- 0
output <- get("output", envir = env)
if( w/2 < 29){
output[i,] <- rspec[1:(w/2)]
}else{
output[i,] <- rspec[1:29]
}
assign("output", output, envir = env)
return(i)
}else{
return(i)
}
} |
stopifnot(require("testthat"),
require("glmmTMB"))
context("Saving and loading glmmTMB objects")
test_that("summary consistency", {
data(sleepstudy, package="lme4")
fm1 <- glmmTMB(Reaction ~ Days + (1|Subject), sleepstudy)
s1 <- capture.output(print(summary(fm1)))
save(fm1, file="fm1.Rdata")
load("fm1.Rdata")
file.remove("fm1.Rdata")
s2 <- capture.output(print(summary(fm1)))
expect_identical(s1, s2)
}) |
if (Sys.getenv("RunAllRcppTests") != "yes") exit_file("Set 'RunAllRcppTests' to 'yes' to run.")
Rcpp::sourceCpp("cpp/as.cpp")
expect_equal( as_int(10), 10L, info = "as<int>( REALSXP ) " )
expect_equal( as_int(10L), 10L, info = "as<int>( INTSXP ) " )
expect_equal( as_int(as.raw(10L)), 10L, info = "as<int>( RAWSXP ) " )
expect_equal( as_int(TRUE), 1L, info = "as<int>( LGLSXP ) " )
expect_equal( as_double(10), 10.0, info = "as<double>( REALSXP ) " )
expect_equal( as_double(10L), 10.0, info = "as<double>( INTSXP ) " )
expect_equal( as_double(as.raw(10L)), 10.0, info = "as<double>( RAWSXP ) " )
expect_equal( as_double(TRUE), 1.0, info = "as<double>( LGLSXP ) " )
expect_equal( as_raw(10), as.raw(10), info = "as<Rbyte>( REALSXP ) " )
expect_equal( as_raw(10L), as.raw(10), info = "as<Rbyte>( INTSXP ) " )
expect_equal( as_raw(as.raw(10L)), as.raw(10), info = "as<Rbyte>( RAWSXP ) " )
expect_equal( as_raw(TRUE), as.raw(1), info = "as<Rbyte>( LGLSXP ) " )
expect_equal( as_bool(10), as.logical(10), info = "as<bool>( REALSXP ) " )
expect_equal( as_bool(10L), as.logical(10), info = "as<bool>( INTSXP ) " )
expect_equal( as_bool(as.raw(10L)), as.logical(10), info = "as<bool>( RAWSXP ) " )
expect_equal( as_bool(TRUE), as.logical(1), info = "as<bool>( LGLSXP ) " )
expect_equal( as_string("foo"), "foo", info = "as<string>( STRSXP ) " )
expect_equal( as_vector_int(1:10), 1:10 , info = "as<vector<int>>( INTSXP ) " )
expect_equal( as_vector_int(as.numeric(1:10)), 1:10 , info = "as<vector<int>>( REALSXP ) " )
expect_equal( as_vector_int(as.raw(1:10)), 1:10 , info = "as<vector<int>>( RAWSXP ) " )
expect_equal( as_vector_int(c(TRUE,FALSE)), 1:0 , info = "as<vector<int>>( LGLSXP ) " )
expect_equal( as_vector_double(1:10), as.numeric(1:10) , info = "as<vector<double>>( INTSXP ) " )
expect_equal( as_vector_double(as.numeric(1:10)), as.numeric(1:10) , info = "as<vector<double>>( REALSXP ) " )
expect_equal( as_vector_double(as.raw(1:10)), as.numeric(1:10), info = "as<vector<double>>( RAWSXP ) " )
expect_equal( as_vector_double(c(TRUE,FALSE)), c(1.0, 0.0) , info = "as<vector<double>>( LGLSXP ) " )
expect_equal( as_vector_raw(1:10), as.raw(1:10) , info = "as<vector<Rbyte>>( INTSXP ) " )
expect_equal( as_vector_raw(as.numeric(1:10)), as.raw(1:10) , info = "as<vector<Rbyte>>( REALSXP ) " )
expect_equal( as_vector_raw(as.raw(1:10)), as.raw(1:10) , info = "as<vector<Rbyte>>( RAWSXP ) " )
expect_equal( as_vector_raw(c(TRUE,FALSE)), as.raw(1:0) , info = "as<vector<Rbyte>>( LGLSXP ) " )
expect_equal( as_vector_bool(0:10), as.logical(0:10) , info = "as<vector<bool>>( INTSXP ) " )
expect_equal( as_vector_bool(as.numeric(0:10)), as.logical(0:10) , info = "as<vector<bool>>( REALSXP ) " )
expect_equal( as_vector_bool(as.raw(0:10)), as.logical(0:10) , info = "as<vector<bool>>( RAWSXP ) " )
expect_equal( as_vector_bool(c(TRUE,FALSE)), as.logical(1:0) , info = "as<vector<bool>>( LGLSXP ) " )
expect_equal( as_vector_string(letters), letters , info = "as<vector<string>>( STRSXP ) " )
expect_equal( as_deque_int(1:10), 1:10 , info = "as<deque<int>>( INTSXP ) " )
expect_equal( as_list_int(1:10), 1:10 , info = "as<list<int>>( INTSXP ) " ) |
faultDetect <- function(threshold_object, observation, ...){
UseMethod("faultDetect")
}
faultDetect.threshold <- function(threshold_object, observation, ...){
SPEthreshold <- threshold_object$SPE_threshold
T2threshold <- threshold_object$T2_threshold
P <- threshold_object$projectionMatrix
LambdaInv <- threshold_object$LambdaInv
proj_observation <- observation %*% P
obs.hat <- proj_observation %*% t(P)
E <- observation - obs.hat
SPE <- diag(E %*% t(E))
SPE_flag <- as.numeric(SPE > SPEthreshold)
T2 <- diag(proj_observation %*% LambdaInv %*% t(proj_observation))
T2_flag <- as.numeric(T2 > T2threshold)
object <- matrix(c(SPE, SPE_flag, T2, T2_flag), nrow = 1)
colnames(object) <- c("SPE", "SPE_Flag", "T2", "T2_Flag")
object
} |
context("read_json")
test_that("read_json correctly infers a .json file", {
json_path <- system.file("extdata", "sample.json", package="tidyjson")
json <- read_json(json_path)
expect_true(json %>% is.tbl_json)
expect_identical(
json %>% gather_array(),
tbl_json(
dplyr::tibble(document.id = 1L, array.index = 1L:8L),
json_get(json)[[1]])
)
})
test_that("read_json correctly infers a .jsonl file", {
json_path <- system.file("extdata", "sample.jsonl", package="tidyjson")
json <- read_json(json_path)
expect_true(json %>% is.tbl_json)
expect_identical(
json,
tbl_json(
dplyr::tibble(document.id = 1:8),
json_get(json)
)
)
})
test_that("read_json does not allow incorrect formats", {
json_path <- system.file("extdata", "sample.jsonl", package="tidyjson")
expect_error(
read_json(json_path, format = "json")
)
expect_error(
read_json(json_path, format = "json123"),
regexp="Unrecognized json format: json123")
})
test_that("read_json fails if it cannot infer format", {
json_path <- system.file("extdata", "sample_jsonl", package="tidyjson")
expect_error(
read_json(json_path)
)
expect_error(
read_json(json_path, format = "infer")
)
})
test_that("read_json uses given format", {
json_path <- system.file("extdata", "sample_jsonl", package="tidyjson")
json <- read_json(json_path, format="jsonl")
expect_true(json %>% is.tbl_json)
expect_identical(
json,
tbl_json(
dplyr::tibble(document.id = 1:8),
json_get(json)
)
)
}) |
displayClustersWithHeatmap <- function (W, group, ColSideColors=NULL,...) {
normalize <- function(X) X/rowSums(X)
ind <- sort(as.vector(group), index.return = TRUE)
ind <- ind$ix
diag(W) <- median(as.vector(W))
W <- normalize(W)
W <- W + t(W)
if(is.null(ColSideColors)){
heatmap(W[ind, ind],scale="none",Rowv=NA,Colv=NA,...)
}
else{
if(is.vector(ColSideColors)){
heatmap(W[ind, ind],scale="none",Rowv=NA,Colv=NA,
ColSideColors=ColSideColors[ind],...)
}
else{
heatmapPlus(W[ind, ind],scale="none",Rowv=NA,Colv=NA,
ColSideColors=ColSideColors[ind,],...)
}
}
return()
} |
knitr::opts_chunk$set(eval = nzchar(Sys.getenv("hydat_eval")),
warning = FALSE,
message = FALSE)
library(fasstr)
plot_flow_data(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(fill_missing_dates(station_number = "08NM116") %>%
add_date_variables() %>%
add_rolling_means() %>%
add_basin_area() %>%
dplyr::filter(WaterYear >= 1990, WaterYear <= 2001)
))
head(as.data.frame(screen_flow_data(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_data_screening(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_missing_dates(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_longterm_monthly_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001,
percentiles = seq(5, 95, by = 5),
transpose = TRUE)))
plot_longterm_monthly_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_longterm_daily_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001,
percentiles = 1:99,
transpose = TRUE)))
plot_longterm_daily_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_flow_duration(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_annual_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001,
include_seasons = TRUE)))
head(as.data.frame(calc_annual_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001,
include_seasons = TRUE,
use_yield = TRUE)))
head(as.data.frame(calc_annual_outside_normal(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_annual_outside_normal(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_annual_flow_timing(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_annual_flow_timing(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_annual_lowflows(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_annual_lowflows(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_annual_lowflows(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[2]]
plot_annual_means(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_annual_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_annual_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_annual_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_annual_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001,use_yield = TRUE)[[1]]
plot_annual_cumulative_stats(station_number = "08NM116", include_seasons = TRUE,
start_year = 1990, end_year = 2001)[[3]]
plot_annual_cumulative_stats(station_number = "08NM116", include_seasons = TRUE,
start_year = 1990, end_year = 2001,use_yield = TRUE)[[3]]
plot_annual_cumulative_stats(station_number = "08NM116", include_seasons = TRUE,
start_year = 1990, end_year = 2001)[[2]]
plot_annual_cumulative_stats(station_number = "08NM116", include_seasons = TRUE,
start_year = 1990, end_year = 2001,use_yield = TRUE)[[2]]
head(as.data.frame(calc_monthly_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_monthly_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[3]]
plot_monthly_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_monthly_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[2]]
plot_monthly_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[4]]
head(as.data.frame(calc_monthly_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_monthly_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_monthly_cumulative_stats(station_number = "08NM116", use_yield = TRUE,
start_year = 1990, end_year = 2001)))
plot_monthly_cumulative_stats(station_number = "08NM116", use_yield = TRUE,
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_daily_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_daily_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_daily_stats(station_number = "08NM116", add_year = 1990,
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_daily_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)))
plot_daily_cumulative_stats(station_number = "08NM116",
start_year = 1990, end_year = 2001)[[1]]
plot_daily_cumulative_stats(station_number = "08NM116", add_year = 1990,
start_year = 1990, end_year = 2001)[[1]]
head(as.data.frame(calc_daily_cumulative_stats(station_number = "08NM116", use_yield = TRUE,
start_year = 1990, end_year = 2001)))
plot_daily_cumulative_stats(station_number = "08NM116", use_yield = TRUE,
start_year = 1990, end_year = 2001)[[1]]
plot_daily_cumulative_stats(station_number = "08NM116", add_year = 1990, use_yield = TRUE,
start_year = 1990, end_year = 2001)[[1]]
trends <- compute_annual_trends(station_number = "08NM116", zyp_method = "zhang", zyp_alpha = 0.05,
start_year = 1990, end_year = 2001)
head(as.data.frame(trends[[1]]))
head(as.data.frame(trends[[2]]))
trends[[51]]
freq <- compute_annual_frequencies(station_number = "08NM116",
start_year = 1990, end_year = 2001)
head(as.data.frame(freq[[1]]))
head(as.data.frame(freq[[2]]))
freq[[3]]
head(as.data.frame(freq[[5]])) |
set.seed(888)
data <- dreamer_data_linear(
n_cohorts = c(20, 20, 20),
dose = c(0, 3, 10),
b1 = 1,
b2 = 3,
sigma = 5
)
output <- dreamer_mcmc(
data = data,
n_adapt = 1e3,
n_burn = 1e3,
n_iter = 1e4,
n_chains = 2,
silent = FALSE,
mod_linear = model_linear(
mu_b1 = 0,
sigma_b1 = 1,
mu_b2 = 0,
sigma_b2 = 1,
shape = 1,
rate = .001,
w_prior = 1 / 2
),
mod_quad = model_quad(
mu_b1 = 0,
sigma_b1 = 1,
mu_b2 = 0,
sigma_b2 = 1,
mu_b3 = 0,
sigma_b3 = 1,
shape = 1,
rate = .001,
w_prior = 1 / 2
)
)
posterior(output)
post <- posterior(output, return_samples = TRUE)
head(post$samps)
posterior(output$mod_quad)
posterior(output, reference_dose = 0) |
semanticCoherence <- function(model, documents, M=10){
if(!inherits(model, "STM")) stop("model must be an STM object")
args <- asSTMCorpus(documents)
documents <- args$documents
if(length(model$beta$logbeta)!=1) {
result <- 0
for(i in 1:length(model$beta$logbeta)){
subset <- which(model$settings$covariates$betaindex==i)
triplet <- doc.to.ijv(documents[subset])
mat <- slam::simple_triplet_matrix(triplet$i, triplet$j,triplet$v, ncol=model$settings$dim$V)
result = result + semCoh1beta(mat, M, beta=model$beta$logbeta[[i]])*length(subset)
}
return(result/length(documents))
}
else {
beta <- model$beta$logbeta[[1]]
top.words <- apply(beta, 1, order, decreasing=TRUE)[1:M,]
triplet <- doc.to.ijv(documents)
mat <- slam::simple_triplet_matrix(triplet$i, triplet$j,triplet$v, ncol=model$settings$dim$V)
result = semCoh1beta(mat, M, beta=beta)
return(result)
}
}
semCoh1beta <- function(mat, M, beta){
top.words <- apply(beta, 1, order, decreasing=TRUE)[1:M,]
wordlist <- unique(as.vector(top.words))
mat <- mat[,wordlist]
mat$v <- ifelse(mat$v>1, 1,mat$v)
cross <- slam::tcrossprod_simple_triplet_matrix(t(mat))
temp <- match(as.vector(top.words),wordlist)
labels <- split(temp, rep(1:nrow(beta), each=M))
sem <- function(ml,cross) {
m <- ml[1]; l <- ml[2]
log(.01 + cross[m,l]) - log(cross[l,l] + .01)
}
result <- vector(length=nrow(beta))
for(k in 1:nrow(beta)) {
grid <- expand.grid(labels[[k]],labels[[k]])
colnames(grid) <- c("m", "l")
grid <- grid[grid$m > grid$l,]
calc <- apply(grid,1,sem,cross)
result[k] <- sum(calc)
}
return(result)
} |
colskewness <- function(x, pvalue = FALSE) {
m <- Rfast::colmeans(x)
y <- Rfast::eachrow(x, m, oper = "-" )
n <- dim(x)[1]
nm1 <- n - 1
up <- n * Rfast::colsums(y^3)
down <- ( Rfast::colsums(y^2) / nm1 )^1.5
skewness <- up / ( nm1 * (n - 2) * down )
if (pvalue) {
vars <- 6 * n * nm1 / ( (n - 2) * (n + 1) * (n + 3) )
stat <- skewness^2/vars
pval <- pchisq(stat, 1, lower.tail = FALSE)
skewness <- cbind(skewness, pval)
colnames(skewness) <- c("skewness", "p-value")
}
skewness
} |
mlmmm.em<-function(y,subj,pred,xcol,zcol,start,maxits=200,eps=.0001){
if(any(is.na(pred)))
stop("missing values in pred not allowed")
if(is.vector(y)) y<-matrix(y,ncol=1)
if(is.vector(pred)) pred<-matrix(pred,ncol=1)
m<-as.integer(length(table(subj)))
ntot<-as.integer(nrow(y))
nmax<-as.integer(max(table(subj)))
r<-as.integer(ncol(y))
p<-length(xcol)
q<-length(zcol)
ggs<-as.integer(round(((q*r)*((q*r)+1)/2)+r*(r+1)/2))
zcol<-as.integer(zcol)
xcol<-as.integer(xcol)
pcol<-as.integer(ncol(pred))
{if(missing(start)){
beta<-matrix(0,p,r)
sigma<-matrix(0,r,r)
psi<-matrix(0,q*r,q*r)
epsi<-matrix(0,ntot,r)
sflag<-as.integer(0)}
else{
beta<-start$beta
sigma<-start$sigma
psi<-start$psi
epsi<-matrix(0,ntot,r)
sflag<-as.integer(1)
storage.mode(eps)<-"double"
storage.mode(beta)<-"double"
storage.mode(sigma)<-"double"
storage.mode(psi)<-"double"}}
cat("
now<-proc.time()
rmat<-1-1*is.na(y)
storage.mode(rmat)<-"integer"
revcpatt<-rep("",ntot)
for(i in 1:r) revcpatt<-paste(as.character(rmat[,i]),revcpatt,sep="")
nulpat0<-""
nulpat2<-""
for(i in 1:r){
nulpat0<-paste(nulpat0,"0",sep="")
nulpat2<-paste(nulpat2,"2",sep="")}
revcpatt[revcpatt==nulpat0]<-nulpat2
tmp<-rev(table(revcpatt))
npatt<-length(tmp)
if(any(revcpatt==nulpat2)) npatt<-npatt-1
ww<-!duplicated(revcpatt)
upatt<-revcpatt[ww]
rmat<-rmat[ww,]
if(r==1) rmat<-matrix(rmat,ncol=1)
ww<-rev(order(upatt))
upatt<-upatt[ww]
rmat<-matrix(rmat,ncol=r,nrow=length(rev(order(upatt))))
rmat<-rmat[ww,]
if(r==1) rmat<-matrix(rmat,ncol=1)
if(any(upatt==nulpat2)){
rmat<-rmat[-1,]
upatt<-upatt[-1]}
patt<-integer(ntot)
patt[revcpatt==nulpat2]<-0
for(i in 1:npatt) patt[revcpatt==upatt[i]]<-i
storage.mode(npatt)<-"integer"
storage.mode(rmat)<-"integer"
storage.mode(patt)<-"integer"
iposn<-as.integer(1:ntot)
ww<-order(patt)
iposn<-iposn[ww]
pstfin<-matrix(0,npatt,2)
{if(any(patt==0)){
sst<-tmp[1]+1
for(i in 2:(npatt+1)){
pstfin[i-1,1]<-sst
pstfin[i-1,2]<-sst+tmp[i]-1
sst<-sst+tmp[i]}}
else{
sst<-1
for(i in 1:npatt){
pstfin[i,1]<-sst
pstfin[i,2]<-sst+tmp[i]-1
sst<-sst+tmp[i]}}}
storage.mode(pstfin)<-"integer"
storage.mode(y)<-"double"
y[is.na(y)]<--999.99
storage.mode(pred)<-"double"
tmp<-.Fortran("mlmmem2",
intinput= as.integer(c(ntot,
m,
r,
p,
q,
subj,
nmax,
iposn,
npatt,
pstfin,
patt,
rmat,
pcol,
xcol,
zcol,
maxits,
ggs,
sflag)),
intoutpt= integer(4+3*m),
dbinput= as.double(c(pred,
y,
sigma,
beta,
psi,
eps,
epsi)),
dboutput= numeric(r*nmax*r*nmax*10),
w=array(0,c(r*nmax,r*nmax,m)),
wkqb2=matrix(0,nmax,r),
vdel=numeric(r*nmax),
uszxb=numeric(r*q),usotzo=matrix(0,r*q,r*nmax),
usotzm=matrix(0,r*q,r*nmax),wxbw=numeric(r*nmax),
wxbwo=numeric(r*nmax),wxbwm=numeric(r*nmax),
wkeb2=matrix(0,r*q,r*nmax),eb=matrix(0,r*q,m),
wxbeta=matrix(0,ntot,r),wxbetazeb=matrix(0,ntot,r),
varb=array(0,c(r*q,r*q,m)),wkrrpt=array(0,c(r,r,npatt)),
wkrrb21=array(0,c(r,r,npatt)),
eystar=matrix(0,ntot,r),ey=matrix(0,ntot,r),
u=array(0,c(r*q,r*q,m)),
ztz=array(0,c(q,q,m)),
xtw=matrix(0,p*r,nmax*r),xtwx=matrix(0,p*r,p*r),
xtwy=numeric(p*r),xtwxinv=matrix(0,p*r,p*r),
wkqq1=matrix(0,r*q,r*q),wkqq2=matrix(0,r*q,r*q),
wkqq3=matrix(0,r*q,r*q),wkrr1=matrix(0,r,r),
wkrr2=matrix(0,r,r),wksigtz=array(0,c(r*q,r*nmax,m)),
wkqqu=array(0,c(r*q,r*q,m)),
wkqnm=array(0,c(r*q,r*nmax,m)),
obeta=matrix(0,p,r),
osigma=matrix(0,r,r),opsi=array(0,c(r*q,r*q)),
llvec=numeric(as.integer(maxits)),
llovec=numeric(as.integer(maxits)),
wkg=rep(0,ggs),wkgg=matrix(0,ggs,ggs),wkpr=matrix(0,p,r),
wkpp=matrix(0,p,p),xtxinv=matrix(0,p,p))
in1 <- 1
in2 <- in1 + 1
in3 <- in2 + 1
in4 <- in3 + 1
in5 <- in4 + 1
ntot <- tmp$intinput[in1]
m <- tmp$intinput[in2]
r <- tmp$intinput[in3]
p <- tmp$intinput[in4]
q <- tmp$intinput[in5]
in6 <- in5 + 1
in7 <- in6 + ntot
in8 <- in7 + 1
in9 <- in8 + ntot
in10 <- in9 + 1
in11 <- in10 + 2*npatt
in12 <- in11 + ntot
in13 <- in12 + r*npatt
in14 <- in13 + 1
in15 <- in14 + p
in16 <- in15 + q
in17 <- in16 + 1
in18 <- in17 + 1
subj <- tmp$intinput[in6:(in7-1)]
nmax <- tmp$intinput[in7]
iposn <- tmp$intinput[in8:(in9-1)]
npatt <- tmp$intinput[in9]
pstfin <- matrix(tmp$intinput[in10:(in11-1)],nrow=npatt)
patt <- tmp$intinput[in11:(in12-1)]
rmat <- matrix(tmp$intinput[in12:(in13-1)],nrow=npatt)
pcol <- tmp$intinput[in13]
xcol <- tmp$intinput[in14:(in15-1)]
zcol <- tmp$intinput[in15:(in16-1)]
maxits <- tmp$intinput[in16]
ggs <- tmp$intinput[in17]
sflag <- tmp$intinput[in18]
isub0 <- r*q
idi1 <- 1
idi2 <- idi1 + ntot*pcol
idi3 <- idi2 + r*ntot
idi4 <- idi3 + r*r
idi5 <- idi4 + p*r
idi6 <- idi5 + isub0*isub0
idi7 <- idi6 + 1
pred <- matrix(tmp$dbinput[idi1:(idi2-1)],nrow= ntot)
y <- matrix(tmp$dbinput[idi2:(idi3-1)],nrow= ntot)
sigma <- matrix(tmp$dbinput[idi3:(idi4-1)],nrow=r)
beta <- matrix(tmp$dbinput[idi4:(idi5-1)],nrow= p)
psi <- array(tmp$dbinput[idi5:(idi6-1)],dim=c(isub0,isub0))
eps <- tmp$dbinput[idi6]
epsi <- matrix(tmp$dbinput[idi7:(idi7+ntot*r-1)],nrow= ntot)
io1 <- 1
io2 <- io1 + m
io3 <- io2 + m
io4 <- io3 + 1
io5 <- io4 + m
io6 <- io5 + 1
io7 <- io6 + 1
ist <- tmp$intoutpt[io1]
ifin <- tmp$intoutpt[io2:(io3-1)]
nstar <- tmp$intoutpt[io3]
nstari <- tmp$intoutpt[io4:(io5-1)]
iter <- tmp$intoutpt[io5]
msg <- tmp$intoutpt[io6]
cvgd <- tmp$intoutpt[io7]
isub1 <- r*nmax
isub <- isub1*isub1
ido1 <- 1
ido2 <- ido1 + isub
ido3 <- ido2 + isub
ido4 <- ido3 + isub
ido5 <- ido4 + isub
ido6 <- ido5 + isub
ido7 <- ido6 + isub
ido8 <- ido7 + isub
ido9 <- ido8 + isub
ido10 <- ido9 + isub
wo <- matrix(tmp$dboutput[ido1:(ido2-1)],nrow= isub1)
wo1 <- matrix(tmp$dboutput[ido2:(ido3-1)],nrow= isub1)
wm <- matrix(tmp$dboutput[ido3:(ido4-1)],nrow= isub1)
wom <- matrix(tmp$dboutput[ido4:(ido5-1)],nrow= isub1)
wkwmm1 <- matrix(tmp$dboutput[ido5:(ido6-1)],nrow= isub1)
wkwmm2 <- matrix(tmp$dboutput[ido6:(ido7-1)],nrow= isub1)
eyyt <- matrix(tmp$dboutput[ido7:(ido8-1)],nrow= isub1)
eyxyxt <- matrix(tmp$dboutput[ido8:(ido9-1)],nrow= isub1)
wkeyxyxt <- matrix(tmp$dboutput[ido9:(ido10-1)],nrow= isub1)
wkqnm1 <- matrix(tmp$dboutput[ido10:(ido10+isub-1)],nrow= isub1)
clock<-proc.time()-now
cat("\n")
{if(msg==1)
warning("xtx is not full rank, failed for calculating beta<-(0)")
else if(msg==2)
warning("Value of psi or sigma or U<-i
became non-pos.def.during iterations")
else if(msg==3)
warning("Value of Var(y<-i(obs)) became non-pos.def.during iterations")
else if(msg==4)
warning("GLS failed for start vals, xtwx not full rank")
else if(msg==5)
warning("Value of psi became non-pos.def. during iterations")
else if(msg==6)
warning("Value of sigma became non-pos.def. during iterations")
else if(msg==7)
warning("log-density not concave at one or more scoring steps")}
llvec<-tmp$llvec[1:iter]
llovec<-tmp$llovec[1:iter]
converged<-cvgd==as.integer(1)
if(!converged) warning(paste("did not converge by",
format(iter),"iterations"))
list(beta=beta,sigma=sigma,psi=psi,eb=tmp$eb,varb=tmp$varb,xtwxinv=tmp$xtwxinv,
converged=converged,iter=iter,npatt=npatt,pstfin=pstfin,iposn=iposn,patt=patt,rmat=rmat,
logll=llvec,logoll=llovec,clock=clock)}
mlmmmbd.em<-function(y,subj,pred,xcol,zcol,start,maxits=100,eps=.01){
if(any(is.na(pred)))
stop("missing values in pred not allowed")
if(is.vector(y)) y<-matrix(y,ncol=1)
if(is.vector(pred)) pred<-matrix(pred,ncol=1)
m<-as.integer(length(table(subj)))
ntot<-as.integer(nrow(y))
nmax<-as.integer(max(table(subj)))
r<-as.integer(ncol(y))
p<-length(xcol)
q<-length(zcol)
ggs<-as.integer(round((r*(q*(q+1))/2)+r*(r+1)/2))
zcol<-as.integer(zcol)
xcol<-as.integer(xcol)
pcol<-as.integer(ncol(pred))
{if(missing(start)){
beta<-matrix(0,p,r)
sigma<-matrix(0,r,r)
psi<-array(0,c(q,q,r))
epsi<-matrix(0,ntot,r)
sflag<-as.integer(0)}
else{
beta<-start$beta
sigma<-start$sigma
psi<-start$psi
epsi<-matrix(0,ntot,r)
sflag<-as.integer(1)
storage.mode(psi)<-"double"
storage.mode(beta)<-"double"
storage.mode(sigma)<-"double"}}
cat("performing block-diagonal version of EM in mlmm with NA values")
now<-proc.time()
rmat<-1-1*is.na(y)
storage.mode(rmat)<-"integer"
revcpatt<-rep("",ntot)
for(i in 1:r) revcpatt<-paste(as.character(rmat[,i]),revcpatt,sep="")
nulpat0<-""
nulpat2<-""
for(i in 1:r){
nulpat0<-paste(nulpat0,"0",sep="")
nulpat2<-paste(nulpat2,"2",sep="")}
revcpatt[revcpatt==nulpat0]<-nulpat2
tmp<-rev(table(revcpatt))
npatt<-length(tmp)
if(any(revcpatt==nulpat2)) npatt<-npatt-1
ww<-!duplicated(revcpatt)
upatt<-revcpatt[ww]
rmat<-rmat[ww,]
if(r==1) rmat<-matrix(rmat,ncol=1)
ww<-rev(order(upatt))
upatt<-upatt[ww]
rmat<-matrix(rmat,ncol=r,nrow=length(rev(order(upatt))))
rmat<-rmat[ww,]
if(r==1) rmat<-matrix(rmat,ncol=1)
if(any(upatt==nulpat2)){
rmat<-rmat[-1,]
upatt<-upatt[-1]}
patt<-integer(ntot)
patt[revcpatt==nulpat2]<-0
for(i in 1:npatt) patt[revcpatt==upatt[i]]<-i
storage.mode(npatt)<-"integer"
storage.mode(rmat)<-"integer"
storage.mode(patt)<-"integer"
iposn<-as.integer(1:ntot)
ww<-order(patt)
iposn<-iposn[ww]
pstfin<-matrix(0,npatt,2)
{if(any(patt==0)){
sst<-tmp[1]+1
for(i in 2:(npatt+1)){
pstfin[i-1,1]<-sst
pstfin[i-1,2]<-sst+tmp[i]-1
sst<-sst+tmp[i]}}
else{
sst<-1
for(i in 1:npatt){
pstfin[i,1]<-sst
pstfin[i,2]<-sst+tmp[i]-1
sst<-sst+tmp[i]}}}
storage.mode(pstfin)<-"integer"
storage.mode(y)<-"double"
y[is.na(y)]<--999.99
storage.mode(pred)<-"double"
tmp<-.Fortran("mlmmembd2",
intinput= as.integer(c(ntot,
m,
r,
p,
q,
subj,
nmax,
iposn,
npatt,
pstfin,
patt,
rmat,
pcol,
xcol,
zcol,
maxits,
ggs,
sflag)),
intoutpt= integer(4+3*m),
dbinput= as.double(c(pred,
y,
sigma,
beta,
psi,
eps,
epsi)),
dboutput= numeric(r*nmax*r*nmax*10),
w=array(0,c(r*nmax,r*nmax,m)),
wkqb2=matrix(0,nmax,r),
vdel=numeric(r*nmax),
uszxb=numeric(r*q),usotzo=matrix(0,r*q,r*nmax),
usotzm=matrix(0,r*q,r*nmax),wxbw=numeric(r*nmax),
wxbwo=numeric(r*nmax),wxbwm=numeric(r*nmax),
wkeb2=matrix(0,r*q,r*nmax),eb=matrix(0,r*q,m),
wxbeta=matrix(0,ntot,r),wxbetazeb=matrix(0,ntot,r),
varb=array(0,c(r*q,r*q,m)),wkrrpt=array(0,c(r,r,npatt)),
wkrrb21=array(0,c(r,r,npatt)),
eystar=matrix(0,ntot,r),ey=matrix(0,ntot,r),
u=array(0,c(r*q,r*q,m)),
ztz=array(0,c(q,q,m)),
xtw=matrix(0,p*r,nmax*r),xtwx=matrix(0,p*r,p*r),
xtwy=numeric(p*r),xtwxinv=matrix(0,p*r,p*r),
wkrqrq1=matrix(0,r*q,r*q),wkrqrq2=matrix(0,r*q,r*q),
wkqq1bd=array(0,c(q,q,r)),wkqq2bd=matrix(0,q,q),
wkqq3=matrix(0,r*q,r*q),wkrr1=matrix(0,r,r),
wkrr2=matrix(0,r,r),wksigtz=array(0,c(r*q,r*nmax,m)),
wkqqu=array(0,c(r*q,r*q,m)),
wkqnm=array(0,c(r*q,r*nmax,m)),
obeta=matrix(0,p,r),
osigma=matrix(0,r,r),opsi=array(0,c(q,q,r)),
llvec=numeric(as.integer(maxits)),
llovec=numeric(as.integer(maxits)),
wkg=rep(0,ggs),wkgg=matrix(0,ggs,ggs),wkpr=matrix(0,p,r),
wkpp=matrix(0,p,p),xtxinv=matrix(0,p,p))
in1 <- 1
in2 <- in1 + 1
in3 <- in2 + 1
in4 <- in3 + 1
in5 <- in4 + 1
ntot <- tmp$intinput[in1]
m <- tmp$intinput[in2]
r <- tmp$intinput[in3]
p <- tmp$intinput[in4]
q <- tmp$intinput[in5]
in6 <- in5 + 1
in7 <- in6 + ntot
in8 <- in7 + 1
in9 <- in8 + ntot
in10 <- in9 + 1
in11 <- in10 + 2*npatt
in12 <- in11 + ntot
in13 <- in12 + r*npatt
in14 <- in13 + 1
in15 <- in14 + p
in16 <- in15 + q
in17 <- in16 + 1
in18 <- in17 + 1
subj <- tmp$intinput[in6:(in7-1)]
nmax <- tmp$intinput[in7]
iposn <- tmp$intinput[in8:(in9-1)]
npatt <- tmp$intinput[in9]
pstfin <- matrix(tmp$intinput[in10:(in11-1)],nrow=npatt)
patt <- tmp$intinput[in11:(in12-1)]
rmat <- matrix(tmp$intinput[in12:(in13-1)],nrow=npatt)
pcol <- tmp$intinput[in13]
xcol <- tmp$intinput[in14:(in15-1)]
zcol <- tmp$intinput[in15:(in16-1)]
maxits <- tmp$intinput[in16]
ggs <- tmp$intinput[in17]
sflag <- tmp$intinput[in18]
idi1 <- 1
idi2 <- idi1 + ntot*pcol
idi3 <- idi2 + r*ntot
idi4 <- idi3 + r*r
idi5 <- idi4 + p*r
idi6 <- idi5 + q*q*r
idi7 <- idi6 + 1
pred <- matrix(tmp$dbinput[idi1:(idi2-1)],nrow= ntot)
y <- matrix(tmp$dbinput[idi2:(idi3-1)],nrow= ntot)
sigma <- matrix(tmp$dbinput[idi3:(idi4-1)],nrow=r)
beta <- matrix(tmp$dbinput[idi4:(idi5-1)],nrow= p)
psi <- array(tmp$dbinput[idi5:(idi6-1)],dim=c(q,q,r))
eps <- tmp$dbinput[idi6]
epsi <- matrix(tmp$dbinput[idi7:(idi7+ntot*r-1)],nrow= ntot)
io1 <- 1
io2 <- io1 + m
io3 <- io2 + m
io4 <- io3 + 1
io5 <- io4 + m
io6 <- io5 + 1
io7 <- io6 + 1
ist <- tmp$intoutpt[io1]
ifin <- tmp$intoutpt[io2:(io3-1)]
nstar <- tmp$intoutpt[io3]
nstari <- tmp$intoutpt[io4:(io5-1)]
iter <- tmp$intoutpt[io5]
msg <- tmp$intoutpt[io6]
cvgd <- tmp$intoutpt[io7]
isub1 <- r*nmax
isub <- isub1*isub1
ido1 <- 1
ido2 <- ido1 + isub
ido3 <- ido2 + isub
ido4 <- ido3 + isub
ido5 <- ido4 + isub
ido6 <- ido5 + isub
ido7 <- ido6 + isub
ido8 <- ido7 + isub
ido9 <- ido8 + isub
ido10 <- ido9 + isub
wo <- matrix(tmp$dboutput[ido1:(ido2-1)],nrow= isub1)
wo1 <- matrix(tmp$dboutput[ido2:(ido3-1)],nrow= isub1)
wm <- matrix(tmp$dboutput[ido3:(ido4-1)],nrow= isub1)
wom <- matrix(tmp$dboutput[ido4:(ido5-1)],nrow= isub1)
wkwmm1 <- matrix(tmp$dboutput[ido5:(ido6-1)],nrow= isub1)
wkwmm2 <- matrix(tmp$dboutput[ido6:(ido7-1)],nrow= isub1)
eyyt <- matrix(tmp$dboutput[ido7:(ido8-1)],nrow= isub1)
eyxyxt <- matrix(tmp$dboutput[ido8:(ido9-1)],nrow= isub1)
wkeyxyxt <- matrix(tmp$dboutput[ido9:(ido10-1)],nrow= isub1)
wkqnm1 <- matrix(tmp$dboutput[ido10:(ido10+isub-1)],nrow= isub1)
clock<-proc.time()-now
cat("\n")
{if(msg==1)
warning("xtx is not full rank, failed for calculating beta<-(0)")
else if(msg==2)
warning("Value of psi or sigma or U<-i
became non-pos.def.during iterations")
else if(msg==3)
warning("Value of Var(y<-i(obs)) became non-pos.def.during iterations")
else if(msg==4)
warning("GLS failed for start vals, xtwx not full rank")
else if(msg==5)
warning("Value of psi became non-pos.def. during iterations")
else if(msg==6)
warning("Value of sigma became non-pos.def. during iterations")
else if(msg==7)
warning("log-density not concave at one or more scoring steps")}
llvec<-tmp$llvec[1:iter]
llovec<-tmp$llovec[1:iter]
converged<-cvgd==as.integer(1)
if(!converged) warning(paste("did not converge by",
format(iter),"iterations"))
list(beta=beta,sigma=sigma,psi=psi,eb=tmp$eb,varb=tmp$varb,xtwxinv=tmp$xtwxinv,
converged=converged,iter=iter,npatt=npatt,pstfin=pstfin,iposn=iposn,patt=patt,rmat=rmat,
logll=llvec,logoll=llovec,clock=clock)} |
cat("\n
cat("\n
meanNA <- function(x){mean(x,na.rm=TRUE)}
medianNA <- function(x){median(x,na.rm=TRUE)}
sdNA <- function(x){sd(c(x),na.rm=TRUE)}
sdcNA <- function(x){leng<-length(x);sd(c(x),na.rm=TRUE)*sqrt((leng-1)/leng)}
varNA <- function(x){var(c(x),na.rm=TRUE)}
rangeNA <- function(x){range(x,na.rm=TRUE)}
which.minNA <- function(x){
y <- which.min(x)
if(length(y)==0){y<-NA}
return(y)
}
is.tna <- function(x){
if(length(x)==0){
return(TRUE)
}else{
if(is.list(x)){x <- unlist(x)}else{}
return(is.na(x)&!is.nan(x))
}
}
catShort <- function(x,nToCat=10){
if(length(x)<=nToCat){
cat(x)
}else{
cat(x[1:nToCat],"...")
}
}
printMatrixShort <- function(mat,nColToPrint=10,nRowToPrint=5){
if(ncol(mat)!=0){
if(ncol(mat)>nColToPrint){
trajToShow <- as.data.frame(mat[,1:nColToPrint])
trajToShow$more <- "..."
}else{
trajToShow <- as.data.frame(mat)
}
if(nrow(mat)>nRowToPrint){
print(trajToShow[1:nRowToPrint,])
cat("... ...\n")
}else{
print(trajToShow)
}
}else{cat(" <no trajectories>\n")}
}
printShort <- function(x) {
if (length(x) <= 10) {
print(x)
}else {
x <- cbind(x[1:10],"...")
names(x)[11] <- ""
print(x)
}
}
printOneTraj <- function(name,oneTraj){
value <- data.frame(t(oneTraj[,2]))
names(value) <- oneTraj[,1]
row.names(value) <- paste(name,":",sep="")
printShort(value)
}
printTrajLong <- function(trajLong,nRowToPrint=5){
id <- unique(trajLong[,1])
if(length(id)>nRowToPrint){id <- id[1:nRowToPrint]}else{}
printOneTraj(id[1],trajLong[trajLong[,1]==id[1],2:3])
for(i in id[-1]){
cat("-------------------------------------------\n")
printOneTraj(i,trajLong[trajLong[,1]==i,2:3])
}
}
reshapeLongToWide <- longToWide <- function(trajLong){
if(ncol(trajLong)!=3){stop("[reduceNbTimesLong] The data.frame 'trajLong' has to be (no choice) in the following format:
- first column should be the individual indentifiant;
- the second should be the times at which the measurement are made;
- the third one should be the measurement.")}else{}
namesCol <- names(trajLong)
trajLong <- trajLong[order(trajLong[,2]),]
return(reshape(trajLong,idvar=namesCol[1],timevar=namesCol[2],v.names=namesCol[3],direction="wide"))
}
reshapeWideToLong <- wideToLong <- function(trajWide,times=1:(ncol(trajWide)-1)){
id <- trajWide[,1]
nbId <- nrow(trajWide)
nbTimes <- ncol(trajWide)-1
trajLong <- data.frame(id=rep(id,each=nbTimes),times=rep(times,nbId),values=as.numeric(t(as.matrix(trajWide[,-1]))))
return(trajLong[!is.na(trajLong[,3]),])
}
cat("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
++++++++++++++++++++++++++++ Fin Function ++++++++++++++++++++++++++
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n") |
setMethod("dots", signature(x="SpatVector"),
function(x, field, size, ...) {
n <- length(x)
if (n < 1) return(NULL)
if (is.character(field)) {
stopifnot(field %in% names(x))
} else {
stopifnot(field > 0 && field <= ncol(x))
}
stopifnot(is.numeric(x[[field,drop=TRUE]]))
field <- x[[field,drop=TRUE]]
size <- size[1]
stopifnot(size > 0)
d <- round(field / size)
d[d < 1 | is.na(d)] <- 0
i <- d > 0;
if (sum(i) == 0) {
error("dots", "'size' is too small")
}
s <- spatSample(x[i], d[i], method="random")
if (.Device != "null device") {
try(points(s, ...), silent=TRUE)
}
invisible(s)
}
)
.plotLines <- function(x, out, lty=1, lwd=1, ...) {
if (nrow(x) == 0) return(out)
cols <- out$cols
if (is.null(cols)) cols = rep("black", length(x))
g <- geom(x, df=TRUE)
g <- split(g, g[,1])
g <- lapply(g, function(x) split(x, x[,2]))
n <- length(g)
lty <- rep_len(lty, n)
lwd <- rep_len(lwd, n)
for (i in 1:n) {
x <- g[[i]]
for (j in 1:length(x)) {
lines(x[[j]][,3:4], col=out$main_cols[i], lwd=lwd[i], lty=lty[i], ...)
}
}
out$leg$lwd <- lwd
out$leg$lty <- lty
out
}
.plotPolygons <- function(x, out, lty=1, lwd=1, density=NULL, angle=45, ...) {
if (nrow(x) == 0) return(out)
g <- geom(x, df=TRUE)
g <- split(g, g[,1])
g <- lapply(g, function(y) split(y, y[,2]))
n <- length(g)
if (!is.null(out$leg$border)) {
out$leg$border <- rep_len(out$leg$border, n)
} else {
out$leg$border <- NA
}
if (!is.null(density)) {
out$leg$density <- rep_len(density, length(g))
out$leg$angle <- rep_len(angle, n)
}
out$leg$lty <- rep_len(lty, n)
out$leg$lwd <- rep_len(lwd, n)
w <- getOption("warn")
on.exit(options("warn" = w))
for (i in 1:length(g)) {
gg <- g[[i]]
for (j in 1:length(gg)) {
a <- gg[[j]]
if (any(is.na(a))) next
if (any(a[,5] > 0)) {
a <- split(a, a[,5])
a <- lapply(a, function(i) rbind(i, NA))
a <- do.call(rbind, a )
a <- a[-nrow(a), ]
}
if (!is.null(out$leg$density)) {
graphics::polygon(a[,3:4], col=out$main_cols[i], density=out$leg$density[i], angle=out$leg$angle[i], border=NA, lwd=out$leg$lwd[i], lty=out$leg$lty[i], ...)
graphics::polypath(a[,3:4], col=NA, rule="evenodd", border=out$leg$border[i], lwd=out$leg$lwd[i], lty=out$leg$lty[i], ...)
} else {
graphics::polypath(a[,3:4], col=out$main_cols[i], rule = "evenodd", border=out$leg$border[i], lwd=out$leg$lwd[i], lty=out$leg$lty[i], ...)
}
}
options("warn" = -1)
}
invisible(out)
}
.vplot <- function(x, out, xlab="", ylab="", cex=1, pch=20, ...) {
if (out$leg$geomtype == "points") {
points(x, col=out$main_cols, cex=cex, pch=pch, ...)
out$leg$pch = pch
out$leg$pt.cex = cex
} else {
if (out$leg$geomtype == "polygons") {
out <- .plotPolygons(x, out, density=out$leg$density, angle=out$leg$angle, ...)
} else {
out <- .plotLines(x, out, ...)
}
}
out
}
.getCols <- function(n, cols, alpha=1) {
if (!is.null(cols)) {
ncols <- length(cols)
if (ncols > n) {
steps <- ncols/n
i <- round(seq(1, ncols, steps))
cols <- cols[i]
} else if (ncols < n) {
cols <- rep_len(cols, n)
}
}
if (alpha < 1 && alpha >= 0) {
cols <- grDevices::rgb(t(grDevices::col2rgb(cols)), alpha=alpha[1]*255, maxColorValue=255)
}
cols
}
.vect.legend.none <- function(out) {
out$main_cols <- .getCols(out$ngeom, out$cols, 1)
out
}
.vect.legend.classes <- function(out) {
if (isTRUE(out$legend_sort)) {
out$uv <- sort(out$uv)
}
ucols <- .getCols(length(out$uv), out$cols, 1)
i <- match(out$v, out$uv)
out$cols <- ucols
out$main_cols <- ucols[i]
if (!is.null(out$colNA)) {
out$main_cols[is.na(out$main_cols)] <- out$colNA
}
out$levels <- out$uv
out$leg$legend <- out$uv
nlevs <- length(out$uv)
cols <- out$cols
ncols <- length(cols)
if (nlevs < ncols) {
i <- trunc((ncols / nlevs) * 1:nlevs)
cols <- cols[i]
} else {
cols <- rep_len(cols, nlevs)
}
out$leg$fill <- cols
out$legend_type <- "classes"
if (is.null(out$leg$x)) {
out$leg$x <- "top"
}
out
}
.vect.legend.continuous <- function(out) {
z <- stats::na.omit(out$v)
n <- length(z)
if (n == 0) error("plot", "no values")
if (!is.numeric(out$v)) {
out$v <- as.integer(as.factor(out$v))
z <- stats::na.omit(out$v)
n <- length(z)
}
interval <- (out$range[2]-out$range[1])/(length(out$cols)-1)
breaks <- out$range[1] + interval * (0:(length(out$cols)-1))
out$legend_type <- "continuous"
if (is.null(out$levels)) {
out$levels <- 5
}
if (is.null(out$leg$digits)) {
dif <- diff(out$range)
if (dif == 0) {
out$leg_digits = 0;
} else {
out$leg$digits <- max(0, -floor(log10(dif/10)))
}
}
if (is.null(out$leg$loc)) out$leg$loc <- "right"
brks <- seq(out$range[1], out$range[2], length.out = length(out$cols))
grps <- cut(out$v, breaks = brks, include.lowest = TRUE)
out$main_cols <- out$cols[grps]
out
}
.vect.legend.interval <- function(out, dig.lab=3, ...) {
nmx <- length(out$uv)
if (!is.numeric(out$v)) {
out$v <- as.integer(as.factor(out$v))
}
if (is.null(out$breaks)) {
out$breaks <- min(5, nmx)
}
if (length(out$breaks) == 1) {
out$breaks <- .get_breaks(out$v, out$breaks, out$breakby, out$range)
}
fz <- cut(out$v, out$breaks, include.lowest=TRUE, right=FALSE, dig.lab=dig.lab)
out$vcut <- as.integer(fz)
levs <- levels(fz)
nlevs <- length(levs)
cols <- out$cols
ncols <- length(cols)
if (nlevs < ncols) {
i <- trunc((ncols / nlevs) * 1:nlevs)
cols <- cols[i]
} else {
cols <- rep_len(cols, nlevs)
}
out$cols <- cols
out$leg$fill <- cols
out$legend_type <- "classes"
if (!is.null(out$leg$legend)) {
if (length(out$leg$legend) != nlevs) {
warn("plot", "legend does not match number of levels")
out$leg$legend <- rep_len(out$leg$legend, nlevs)
}
} else {
levs <- gsub("]", "", gsub(")", "", gsub("\\[", "", levs)))
levs <- paste(levs, collapse=",")
m <- matrix(as.numeric(unlist(strsplit(levs, ","))), ncol=2, byrow=TRUE)
m <- apply(m, 1, function(i) paste(i, collapse=" - "))
out$leg$legend <- m
}
if (is.null(out$leg$x)) {
out$leg$x <- "top"
}
out$main_cols <- out$cols[out$vcut]
if (!is.null(out$colNA)) {
out$main_cols[is.na(out$main_cols)] <- out$colNA
}
out
}
.plot.vect.map <- function(x, out, xlab="", ylab="", type = "n", yaxs="i", xaxs="i", asp=out$asp, density=NULL, angle=45, border="black", dig.lab=3, main="", sort=TRUE, ...) {
if ((!out$add) & (!out$legend_only)) {
if (!any(is.na(out$mar))) { graphics::par(mar=out$mar) }
plot(out$lim[1:2], out$lim[3:4], type="n", xlab=xlab, ylab=ylab, asp=asp, xaxs=xaxs, yaxs=yaxs, axes=FALSE, main=main)
if (!is.null(out$background)) {
usr <- graphics::par("usr")
graphics::rect(usr[1], usr[3], usr[2], usr[4], col=out$background)
}
}
out$leg$density <- density
out$leg$angle <- angle
out$leg$border <- border
out$legend_sort <- isTRUE(sort)
nuq <- length(out$uv)
if (out$legend_type == "none") {
out <- .vect.legend.none(out)
} else if (out$legend_type == "classes") {
out <- .vect.legend.classes(out)
} else if (out$legend_type == "interval") {
if (nuq < 2) {
out <- .vect.legend.classes(out, ...)
} else {
out <- .vect.legend.interval(out, dig.lab=dig.lab)
}
} else if (out$legend_type == "depends") {
if (nuq < 11) {
out <- .vect.legend.classes(out)
} else if (!is.numeric(out$uv)) {
out <- .vect.legend.classes(out)
} else {
out <- .vect.legend.interval(out, dig.lab=dig.lab)
}
} else {
if (nuq == 1) {
out <- .vect.legend.classes(out)
} else {
out <- .vect.legend.continuous(out)
out$leg$density <- NULL
}
}
if (!out$legend_only) {
out <- .vplot(x, out, ...)
}
if (out$axes) {
out <- .plot.axes(out)
}
if (out$legend_draw) {
if (out$legend_type == "continuous") {
out$legpars <- do.call(.plot.cont.legend, list(x=out))
} else {
out$legpars <- do.call(.plot.class.legend, out$leg)
}
}
out
}
.prep.vect.data <- function(x, y, type, cols=NULL, mar=NULL, legend=TRUE,
legend.only=FALSE, levels=NULL, add=FALSE, range=NULL, breaks=NULL, breakby="eqint",
xlim=NULL, ylim=NULL, colNA=NA, alpha=NULL, axes=TRUE, main=NULL, buffer=TRUE, background=NULL,
pax=list(), plg=list(), ext=NULL, grid=FALSE, las=0, ...) {
out <- list()
out$ngeom <- nrow(x)
e <- as.vector(ext(x))
out$ext <- e
if (!is.null(ext)) {
stopifnot(inherits(ext, "SpatExtent"))
x <- crop(x, ext)
out$ext <- as.vector(ext(x))
out$lim <- ext
} else {
if (!is.null(xlim)) {
stopifnot(length(xlim) == 2)
e[1:2] <- sort(xlim)
} else if (buffer) {
dx <- diff(e[1:2]) / 50
e[1:2] <- e[1:2] + c(-dx, dx)
}
if (!is.null(ylim)) {
stopifnot(length(ylim) == 2)
e[3:4] <- sort(ylim)
} else if (buffer) {
dy <- diff(e[3:4]) / 50
e[3:4] <- e[3:4] + c(-dy, dy)
}
out$lim <- e
}
out$add <- isTRUE(add)
out$axes <- isTRUE(axes)
out$axs <- as.list(pax)
if (is.null(out$axs$las)) out$axs$las <- las
out$draw_grid <- isTRUE(grid)
out$leg <- as.list(plg)
out$leg$geomtype <- geomtype(x)
out$asp <- 1
out$lonlat <- is.lonlat(x, perhaps=TRUE, warn=FALSE)
if (out$lonlat) {
out$asp <- 1/cos((mean(out$ext[3:4]) * pi)/180)
}
out$breaks <- breaks
out$breakby <- breakby
out$background <- background
v <- unlist(x[, y, drop=TRUE], use.names=FALSE)
if (!is.null(range)) {
range <- sort(range)
v[v < range[1]] <- NA
v[v > range[2]] <- NA
if (all(is.na(v))) {
v <- NULL
y <- ""
type = "none"
} else {
out$range <- range
}
out$range_set <- TRUE
} else {
if (!is.null(v)) {
out$range <- range(v, na.rm=TRUE)
}
out$range_set <- FALSE
}
out$v <- v
out$uv <- unique(out$v)
if (missing(type)) {
type <- "depends"
} else {
type <- match.arg(type, c("continuous", "classes", "interval", "depends", "none"))
}
out$levels <- levels
if (type=="none") {
legend <- FALSE
legend_only <- FALSE
}
out$legend_type <- type
if (is.null(cols)) {
if (type == "none") {
if (out$leg$geomtype %in% c("lines", "points")) {
cols <- "black"
}
} else {
cols <- rev(grDevices::rainbow(100, start=.1, end=0.9))
}
}
if (!is.null(alpha)) {
alpha <- clamp(alpha[1]*255, 0, 255)
cols <- grDevices::rgb(t(grDevices::col2rgb(cols)), alpha=alpha, maxColorValue=255)
} else {
alpha <- 255
}
out$cols <- cols
out$legend_draw <- isTRUE(legend)
out$legend_only <- isTRUE(legend.only)
if (is.null(mar)) {
if (out$legend_draw) {
mar=c(3.1, 3.1, 2.1, 7.1)
} else {
mar=c(3.1, 3.1, 2.1, 2.1)
}
}
out$mar <- rep_len(mar, 4)
out$skipNA <- TRUE
if (!is.null(colNA)) {
if (!is.na(colNA)) {
out$colNA <- grDevices::rgb(t(grDevices::col2rgb(colNA)), alpha=alpha, maxColorValue=255)
out$r[is.na(out$r)] <- out$colNA
out$skipNA <- FALSE
} else {
out$colNA <- NULL
}
}
.plot.vect.map(x, out, main=main, ...)
}
setMethod("plot", signature(x="SpatVector", y="character"),
function(x, y, col=NULL, type, mar=NULL, legend=TRUE, add=FALSE, axes=!add,
main=y, buffer=TRUE, background=NULL, grid=FALSE, ext=NULL,
plg=list(), pax=list(), nr, nc, ...) {
if (nrow(x) == 0) {
error("plot", "SpatVector has zero geometries")
}
y <- trimws(y)
if (any(is.na(match(y, c("", names(x)))))) {
i <- is.na(match(y, names(x)))
error("plot", paste(paste(y[i], collapse=",")), " is not a name in x")
}
nrnc <- c(1,1)
if (length(y) > 1) {
nrnc <- .get_nrnc(nr, nc, length(y))
old.par <- graphics::par(no.readonly =TRUE)
on.exit(graphics::par(old.par))
graphics::par(mfrow=nrnc)
}
if (is.character(legend)) {
plg$x <- legend
legend <- TRUE
}
for (i in 1:length(y)) {
if (length(y) > 1) {
newrow <- (nrnc[2] == 1) | ((i %% nrnc[2]) == 1)
lastrow <- i > (prod(nrnc) - nrnc[2])
if (lastrow) {
if (newrow) {
pax$sides <- 1:2
} else {
pax$sides <- 1
}
} else if (newrow) {
pax$sides <- 2
} else {
pax$sides <- 0
}
}
if (missing(col)) col <- NULL
if (y[i] == "") {
out <- .prep.vect.data(x, y="", type="none", cols=col, mar=mar, plg=list(), pax=pax, legend=FALSE, add=add, axes=axes, main=main[i], buffer=buffer, background=background, grid=grid, ext=ext, ...)
} else {
out <- .prep.vect.data(x, y[i], type=type, cols=col, mar=mar, plg=plg, pax=pax, legend=isTRUE(legend), add=add, axes=axes, main=main[i], buffer=buffer, background=background, grid=grid, ext=ext, ...)
}
}
invisible(out)
}
)
setMethod("plot", signature(x="SpatVector", y="numeric"),
function(x, y, ...) {
y <- round(y)
if (any(y > ncol(x))) {
error("plot", paste("x only has", ncol(x), " columns"))
}
y[y<0] <- 0
y <- c("", names(x))[y+1]
out <- plot(x, y, ...)
invisible(out)
}
)
setMethod("plot", signature(x="SpatVector", y="missing"),
function(x, y, ...) {
out <- plot(x, "", ...)
invisible(out)
}
)
setMethod("plot", signature(x="SpatVectorProxy", y="missing"),
function(x, y, ...) {
plot(ext(x), ...)
}
) |
checkBounds <- function(bounds, fname) {
if (length(bounds$x) < 2) warning(paste0("Bounding box not recorded for ", fname))
if (length(bounds$x) > 26) warning(paste0("Only first 26 points used for ", fname))
points <- utils::head(as.data.frame(bounds),26)
return(points)
} |
anesrake <-
function(inputter, dataframe, caseid,
weightvec = NULL, cap = 5, verbose = FALSE, maxit = 1000,
type = "pctlim", pctlim = 5, nlim = 5, filter = 1, choosemethod = "total",
iterate = TRUE, convcrit = 0.01, force1 = TRUE, center.baseweights=TRUE) {
dataframe <- dataframe[filter == 1, ]
caseid <- caseid[filter == 1]
weightvec <- weightvec[filter == 1]
mat <- as.data.frame(dataframe)
origtype = type
fullvars <- 0
if (is.null(weightvec)) {
weightvec <- rep(1, length(caseid))
}
if(center.baseweights==TRUE){
weightvec <- weightvec/mean(weightvec, na.rm=TRUE)
}
if (length(weightvec) != length(caseid)) {
stop("weight vector does not contain the same number of cases as data frame")
}
prevec <- weightvec
not100 <- NULL
not100 <- names(inputter)[!(sapply(inputter, function(x) sum(x) %in% c(1, 100)))]
if(!is.null(not100) & force1==FALSE & length(not100)>0){
warning(paste("Targets for", not100, "do not sum to 100%. Did you make a typo entering the targets?"))
warning(paste("You can force variables to sum to 1 by setting force1 to 'TRUE'"))
}
if(sum(names(inputter) %in% names(dataframe))!=length(names(inputter)))
stop(paste("The names of the target variables should match the names of the data frame they are being matched to. The variable(s) -", names(inputter)[!(names(inputter) %in% names(dataframe))], "- were not found in the data frame"))
if(force1==TRUE){
if(!is.null(not100) & length(not100)>0)
warning(paste("Targets for", not100, "do not sum to 100%. Adjusting values to total 100%"))
inputter <- lapply(inputter, function(x) x/sum(x))
}
illegalnegs <- sum(unlist(inputter)<0)
discrep1 <- anesrakefinder(inputter, dataframe, weightvec,
choosemethod)
if (type == "nolim") {
towers <- inputter
}
if (type == "pctlim") {
towers <- selecthighestpcts(discrep1, inputter, pctlim)
}
if (type == "nlim") {
towers <- selectnhighest(discrep1, inputter, nlim)
}
if (type == "nmin") {
towers <- selecthighestpcts(discrep1, inputter, pctlim,
tostop = 0)
towers2x <- selectnhighest(discrep1, inputter, nlim)
if (length(towers) > length(towers2x)) {
type <- "pctlim"
}
if (length(towers) < length(towers2x)) {
towers <- towers2x
}
}
if (type == "nmax") {
fullvars <- 0
discrep1 <- anesrakefinder(inputter, dataframe, weightvec,
choosemethod)
towers <- selecthighestpcts(discrep1, inputter, pctlim)
towers2x <- selectnhighest(discrep1, inputter, nlim)
if (length(towers) > length(towers2x)) {
towers <- towers2x
fullvars <- 1
}
}
ranweight <- rakelist(towers, mat, caseid, weightvec, cap,
verbose, maxit, convcrit)
iterations <- ranweight$iterations
iter1 <- ranweight$iterations
weightout <- ranweight$weightvec
if (type == "pctlim" & iterate == TRUE) {
ww <- 0
it <- 0
while (ww < 1) {
it <- it + 1
addtotowers <- selecthighestpcts(anesrakefinder(inputter,
dataframe, weightout), inputter, pctlim, tostop = 0,
warn = 0)
adders <- addtotowers[!(names(addtotowers) %in% names(towers))]
tow2 <- c(towers, adders)
towersx <- towers
towers <- tow2
if (sum(as.numeric(names(towersx) %in% names(towers))) ==
length(towers)) {
ww <- 1
}
if (sum(as.numeric(names(towersx) %in% names(towers))) !=
length(towers)) {
if(verbose==TRUE)
print(paste("Additional variable(s) off after raking, rerunning with variable(s) included"))
ranweight <- rakelist(towers, mat, caseid, weightvec,
cap, verbose, maxit, convcrit)
weightout <- ranweight$weightvec
}
if (it > 10) {
ww <- 1
}
iterations <- ranweight$iterations
}
}
if (type == "nmax" & fullvars == 0 & iterate == TRUE) {
ww <- 0
it <- 0
rundiscrep <- discrep1
discrep2 <- rep(0, length(discrep1))
while (ww < 1) {
it <- it + 1
rundiscrep <- rundiscrep + discrep2
discrep2 <- anesrakefinder(inputter, dataframe, weightout)
addtotowers <- selecthighestpcts(discrep2, inputter,
pctlim, tostop = 0)
tow2 <- c(towers, addtotowers)
towersx <- towers
towers <- unique(tow2)
names(towers) <- unique(names(tow2))
if (sum(as.numeric(names(towersx) %in% names(towers))) ==
length(towers)) {
ww <- 1
}
if (sum(as.numeric(names(towersx) %in% names(towers))) !=
length(towers)) {
if(verbose==TRUE)
print(paste("Additional variable(s) off after raking, rerunning with variable(s) included"))
ranweight <- rakelist(towers, mat, caseid, weightvec,
cap, verbose, maxit, convcrit)
weightout <- ranweight$weightvec
}
if (sum(as.numeric(names(towersx) %in% names(towers))) >
nlim) {
print("variable maximum reached, running on most discrepant overall variables")
towers <- selectnhighest(discrep1, inputter,
nlim)
ranweight <- rakelist(towers, mat, caseid, weightvec,
cap, verbose, maxit, convcrit)
weightout <- ranweight$weightvec
iterations <- 0
ww <- 1
}
if (it >= 10) {
ww <- 1
}
iterations <- ranweight$iterations
}
}
names(weightout) <- caseid
out <- list(weightvec = weightout, type = type, caseid = caseid,
varsused = names(towers), choosemethod = choosemethod,
converge = ranweight$converge, nonconvergence = ranweight$nonconvergence,
targets = inputter, dataframe = dataframe, iterations = iterations,
iterate = iterate, prevec=prevec)
class(out) <- c("anesrake", "anesrakelist")
out
} |
test_that("test c.corpus", {
suppressWarnings({
expect_equal(
matrix(dfm(corpus(c("What does the fox say?", "What does the fox say?", "")),
remove_punct = TRUE)),
matrix(rep(c(1, 1, 0), 5), nrow = 15, ncol = 1)
)
})
})
test_that("test rbind.dfm with different columns", {
dfmt1 <- dfm(tokens(c(text1 = "What does the fox?"), remove_punct = TRUE))
dfmt2 <- dfm(tokens(c(text2 = "fox say"), remove_punct = TRUE))
dfmt3 <- rbind(dfmt1, dfmt2)
dfmt4 <- as.dfm(matrix(c(1, 0, 1, 1, 0, 1, 1, 0, 1, 0), nrow = 2,
dimnames = list(c("text1", "text2"),
c("does", "fox", "say", "the", "what"))))
expect_true(
setequal(featnames(dfmt3), featnames(dfmt4))
)
expect_that(
rbind(dfmt1, dfmt2),
is_a("dfm")
)
})
test_that("test rbind.dfm with different columns, three args and repeated words", {
dfmt1 <- dfm(tokens("What does the?", remove_punct = TRUE))
dfmt2 <- dfm(tokens("fox say fox", remove_punct = TRUE))
dfmt3 <- dfm(tokens("The quick brown fox", remove_punct = TRUE))
dfmt4 <- rbind(dfmt1, dfmt2, dfmt3)
dfmt5 <- as.dfm(matrix(
c(0, 0, 1, 1, 0, 0, 0, 2, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0),
nrow = 3,
dimnames = list(
c("text1", "text1", "text1"),
c("brown", "does", "fox", "quick", "say", "the", "what")
)
))
expect_true(
setequal(featnames(dfmt4), featnames(dfmt5))
)
expect_that(
rbind(dfmt1, dfmt2, dfmt3),
is_a("dfm")
)
})
test_that("test rbind.dfm with a single argument returns the same dfm", {
fox <- "What does the fox say?"
expect_true(
all(
rbind(dfm(tokens(fox))) == dfm(tokens(fox))
)
)
expect_that(
rbind(dfm(tokens(fox, remove_punct = TRUE))),
is_a("dfm")
)
})
test_that("test rbind.dfm with the same features, but in a different order", {
fox <- "What does the fox say?"
xof <- "say fox the does What??"
foxdfm <- rep(1, 20)
dim(foxdfm) <- c(4, 5)
colnames(foxdfm) <- c("does", "fox", "say", "the", "what")
rownames(foxdfm) <- rep(c("text1", "text2"), 2)
dfm1 <- dfm(tokens(c(fox, xof), remove_punct = TRUE))
expect_true(
all(rbind(dfm1, dfm1) == foxdfm)
)
})
test_that("dfm keeps all types with > 10,000 documents (
generate_testdfm <- function(n) {
dfm(tokens(paste("X", seq_len(n), sep = "")))
}
expect_equal(nfeat(generate_testdfm(10000)), 10000)
expect_equal(nfeat(generate_testdfm(20000)), 20000)
})
test_that("dfm keeps all types with > 10,000 documents (
set.seed(10)
generate_testdfm <- function(n) {
dfm(tokens(paste(sample(letters, n, replace = TRUE), 1:n)))
}
expect_equal(nfeat(generate_testdfm(10000)), 10026)
expect_equal(nfeat(generate_testdfm(10001)), 10027)
})
test_that("dfm.dfm works as expected", {
corp <- data_corpus_inaugural
toks <- tokens(corp)
dfmt <- dfm(toks, tolower = FALSE)
expect_identical(dfm(toks, tolower = FALSE), dfm(dfmt, tolower = FALSE))
expect_identical(dfm(toks, tolower = TRUE), dfm(dfmt, tolower = TRUE))
expect_identical(dfmt, dfm(dfmt, tolower = FALSE))
expect_identical(dfm_tolower(dfmt), dfm(dfmt, tolower = TRUE))
dfmt_group <- suppressWarnings(dfm(dfmt,
groups = ifelse(docvars(data_corpus_inaugural, "Party") %in%
c("Democratic", "Republican"), "Mainstream", "Minor"),
tolower = FALSE))
expect_identical(colSums(dfmt_group), colSums(dfmt_group))
expect_identical(docnames(dfmt_group), c("Mainstream", "Minor"))
dict <- dictionary(list(articles = c("The", "a", "an"),
preps = c("of", "for", "In")), tolower = FALSE)
expect_equivalent(
suppressWarnings(dfm(corp, dictionary = dict)),
suppressWarnings(dfm(dfmt, dictionary = dict))
)
expect_equivalent(
suppressWarnings(dfm(dfmt, dictionary = dict)),
dfm(tokens_lookup(toks, dict))
)
expect_identical(
suppressWarnings(dfm(tokens(corp), stem = TRUE)),
suppressWarnings(dfm(dfmt, stem = TRUE))
)
expect_identical(
suppressWarnings(dfm(tokens(corp), stem = TRUE)),
suppressWarnings(dfm(dfmt, stem = TRUE))
)
})
test_that("cbind.dfm works as expected", {
dfm1 <- dfm(tokens("This is one sample text sample"))
dfm2 <- dfm(tokens("More words here"))
dfm12 <- cbind(dfm1, dfm2)
expect_equal(nfeat(dfm12), 8)
expect_equal(names(dimnames(dfm12)),
c("docs", "features"))
})
test_that("cbind.dfm works with non-dfm objects", {
dfm1 <- dfm(tokens(c("a b c", "c d e")))
vec <- c(10, 20)
expect_equal(
as.matrix(cbind(dfm1, vec)),
matrix(c(1, 1, 1, 0, 0, 10, 0, 0, 1, 1, 1, 20), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c(letters[1:5], "feat1")))
)
expect_equal(
as.matrix(cbind(vec, dfm1)),
matrix(c(10, 1, 1, 1, 0, 0, 20, 0, 0, 1, 1, 1), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c("feat1", letters[1:5])))
)
mat <- matrix(1:4, nrow = 2, dimnames = list(NULL, c("f1", "f2")))
expect_equal(
as.matrix(cbind(dfm1, mat)),
matrix(c(1,1,1,0,0,1,3, 0,0,1,1,1,2,4), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c(letters[1:5], "f1", "f2")))
)
expect_equal(
as.matrix(cbind(mat, dfm1)),
matrix(c(1,3,1,1,1,0,0, 2,4,0,0,1,1,1), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c("f1", "f2", letters[1:5])))
)
expect_equal(
as.matrix(cbind(dfm1, vec, mat)),
matrix(c(1,1,1,0,0,10,1,3, 0,0,1,1,1,20,2,4), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"),
features = c(letters[1:5], "feat1", "f1", "f2")))
)
expect_equal(
suppressWarnings(as.matrix(cbind(vec, dfm1, vec))),
matrix(c(10,1,1,1,0,0,10, 20,0,0,1,1,1,20), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"),
features = c("feat1", letters[1:5], "feat1")))
)
expect_warning(
cbind(vec, dfm1, vec),
"cbinding dfms with overlapping features"
)
expect_warning(
cbind(dfm1, dfm1),
"cbinding dfms with overlapping features"
)
expect_equal(
as.matrix(cbind(dfm1, 100)),
matrix(c(1, 1, 1, 0, 0, 100, 0, 0, 1, 1, 1, 100), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c(letters[1:5], "feat1")))
)
})
test_that("more cbind tests for dfms", {
txts <- c("a b c d", "b c d e")
mydfm <- dfm(tokens(txts))
expect_equal(
as.matrix(cbind(mydfm, as.dfm(cbind("ALL" = ntoken(mydfm))))),
matrix(c(1,1,1,1,0,4, 0,1,1,1,1,4), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c(letters[1:5], "ALL")))
)
expect_equal(
as.matrix(cbind(mydfm, cbind("ALL" = ntoken(mydfm)))),
matrix(c(1,1,1,1,0,4, 0,1,1,1,1,4), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c(letters[1:5], "ALL")))
)
expect_equal(
as.matrix(cbind(mydfm, "ALL" = ntoken(mydfm))),
matrix(c(1,1,1,1,0,4, 0,1,1,1,1,4), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c(letters[1:5], "ALL")))
)
expect_equal(
as.matrix(cbind(mydfm, ntoken(mydfm))),
matrix(c(1,1,1,1,0,4, 0,1,1,1,1,4), byrow = TRUE, nrow = 2,
dimnames = list(docs = c("text1", "text2"), features = c(letters[1:5], "feat1")))
)
})
test_that("cbind.dfm keeps attributes of the dfm", {
mx1 <- as.dfm(matrix(c(0, 0, 0, 0, 1, 2), nrow = 2,
dimnames = list(c("doc1", "doc2"), c("aa", "bb", "cc"))))
mx2 <- as.dfm(matrix(c(2, 3, 0, 0, 0, 0), nrow = 2,
dimnames = list(c("doc1", "doc2"), c("dd", "ee", "ff"))))
meta(mx1, "settings") <- list(somesetting = "somevalue")
mx3 <- cbind(mx1, mx2)
expect_equal(meta(mx3), list(settings = list(somesetting = "somevalue")))
})
test_that("rbind.dfm works as expected", {
dfm1 <- dfm(tokens("This is one sample text sample"))
dfm2 <- dfm(tokens("More words here"))
dfm12 <- rbind(dfm1, dfm2)
expect_equal(nfeat(dfm12), 8)
expect_equal(ndoc(dfm12), 2)
expect_equal(names(dimnames(dfm12)),
c("docs", "features"))
})
test_that("dfm(x, dictionary = mwvdict) works with multi-word values", {
mwvdict <- dictionary(list(sequence1 = "a b", sequence2 = "x y", notseq = c("d", "e")))
txt <- c(d1 = "a b c d e f g x y z",
d2 = "a c d x z",
d3 = "x y",
d4 = "f g")
toks <- tokens(txt)
dfm1 <- suppressWarnings(dfm(toks, dictionary = mwvdict, verbose = TRUE))
expect_identical(
as.matrix(dfm1),
matrix(c(1, 0, 0, 0, 1, 0, 1, 0, 2, 1, 0, 0),
nrow = 4,
dimnames = list(docs = paste0("d", 1:4),
features = c("sequence1", "sequence2", "notseq")))
)
dfm2 <- suppressWarnings(dfm(toks, thesaurus = mwvdict, verbose = TRUE))
expect_identical(
as.matrix(dfm2),
matrix(c(1, 0, 0, 0, 1, 1, 0, 0, 2, 1, 0, 0, 1, 0, 0, 1,
1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0),
nrow = 4,
dimnames = list(docs = paste0("d", 1:4),
features = c("SEQUENCE1", "c", "NOTSEQ", "f", "g",
"SEQUENCE2", "z", "a", "x")))
)
})
test_that("dfm works with relational operators", {
testdfm <- dfm(tokens(c("This is an example.", "This is a second example.")))
expect_is(testdfm == 0, "lgCMatrix")
expect_is(testdfm >= 0, "lgCMatrix")
expect_is(testdfm <= 0, "lgCMatrix")
expect_is(testdfm < 0, "lgCMatrix")
expect_is(testdfm < 1, "lgCMatrix")
expect_is(testdfm > 0, "lgCMatrix")
expect_is(testdfm > 1, "lgCMatrix")
expect_is(testdfm > -1, "lgCMatrix")
expect_is(testdfm < -1, "lgCMatrix")
})
test_that("dfm addition (+) keeps attributes
dfmt <- head(data_dfm_lbgexample, 4)
meta(dfmt, "testsetting") <- list(test = 1)
expect_equal(
meta(dfmt + 1)["testsetting"],
list(testsetting = list(test = 1))
)
expect_equal(
meta(1 + dfmt)["testsetting"],
list(testsetting = list(test = 1))
)
dfmt@meta$object$weight_tf <- list(scheme = "prop", base = exp(1), K = 2)
expect_equal(
(dfmt + 1)@meta$object$weight_tf,
list(scheme = "prop", base = exp(1), K = 2)
)
expect_equal(
(1 + dfmt)@meta$object$weight_tf,
list(scheme = "prop", base = exp(1), K = 2)
)
weight <- list(scheme = "idf", base = NULL, c = NULL,
smoothing = NULL, threshold = NULL)
dfmt@meta$object$weight_df <- weight
expect_equal(
(dfmt + 1)@meta$object$weight_df,
weight
)
expect_equal(
(1 + dfmt)@meta$object$weight_df,
weight
)
dfmt@meta$object$smooth <- 5.5
expect_equal(
(dfmt + 1)@meta$object$smooth,
5.5
)
expect_equal(
(1 + dfmt)@meta$object$smooth,
5.5
)
dfmt@meta$object$ngram <- 5L
expect_equal(
(dfmt + 1)@meta$object$ngram,
5L
)
expect_equal(
(1 + dfmt)@meta$object$ngram,
5L
)
dfmt@meta$object$skip <- 3L
expect_equal(
(dfmt + 1)@meta$object$skip,
3L
)
expect_equal(
(1 + dfmt)@meta$object$skip,
3L
)
dfmt@meta$object$concatenator <- "+-+"
expect_equal(
(dfmt + 1)@meta$object$concatenator,
"+-+"
)
expect_equal(
(1 + dfmt)@meta$object$concatenator,
"+-+"
)
dfmt@meta$system$`package-version` <- as.package_version("10.5.1")
expect_equal(
(dfmt + 1)@meta$system$`package-version`,
as.package_version("10.5.1")
)
expect_equal(
(1 + dfmt)@meta$system$`package-version`,
as.package_version("10.5.1")
)
dfmt@docvars <- data.frame(test = letters[1:ndoc(dfmt)])
expect_equal(
(dfmt + 1)@docvars,
data.frame(test = letters[1:ndoc(dfmt)])
)
expect_equal(
(1 + dfmt)@docvars,
data.frame(test = letters[1:ndoc(dfmt)])
)
})
test_that("dfm's document counts in verbose message is correct", {
txt <- c(d1 = "a b c d e f g x y z",
d2 = "a c d x z",
d3 = "x y",
d4 = "f g")
expect_message(suppressWarnings(dfm(tokens(txt), remove = c("a", "f"), verbose = TRUE)),
"removed 2 features")
expect_message(suppressWarnings(dfm(tokens(txt), select = c("a", "f"), verbose = TRUE)),
"kept 2 features")
})
test_that("dfm print works with options as expected", {
dfmt <- dfm(tokens(data_corpus_inaugural[1:14],
remove_punct = FALSE, remove_numbers = FALSE, split_hyphens = TRUE))
expect_output(
print(dfmt, max_ndoc = 6, max_nfeat = 10, show_summary = TRUE),
paste0("^Document-feature matrix of: 14 documents, 4,452 features \\(81\\.97% sparse\\) and 4 docvars",
".*",
"\\[ reached max_ndoc \\.\\.\\. 8 more documents, reached max_nfeat \\.\\.\\. 4,442 more features \\]$")
)
expect_output(
print(dfmt[1:5, 1:5], max_ndoc = 6, max_nfeat = 10, show_summary = TRUE),
paste0("^Document-feature matrix of: 5 documents, 5 features \\(4\\.00% sparse\\) and 4 docvars\\.",
".*",
"1789-Washington\\s+3\\s+2\\s+5\\s+71\\s+116")
)
expect_output(
print(dfmt[1:5, 1:5], max_ndoc = -1, max_nfeat = -1, show_summary = TRUE),
paste0("^Document-feature matrix of: 5 documents, 5 features \\(4\\.00% sparse\\) and 4 docvars\\.",
".*",
"1805-Jefferson\\s+8\\s+1\\s+10\\s+101\\s+143")
)
expect_output(
print(dfmt[1:5, 1:5], max_ndoc = 0, max_nfeat = -1, show_summary = TRUE),
"^Document-feature matrix of: 5 documents, 5 features \\(4\\.00% sparse\\) and 4 docvars\\.$"
)
expect_output(
print(dfmt[1:5, 1:5], max_ndoc = -1, max_nfeat = 0, show_summary = TRUE),
paste0("^Document-feature matrix of: 5 documents, 5 features \\(4\\.00% sparse\\) and 4 docvars\\.",
"\\n",
"\\[ reached max_nfeat \\.\\.\\. 5 more features ]$")
)
expect_output(
print(dfmt, max_ndoc = 6, max_nfeat = 10, show_summary = FALSE),
paste0("^\\s+features",
".*",
"\\[ reached max_ndoc \\.\\.\\. 8 more documents, reached max_nfeat \\.\\.\\. 4,442 more features \\]$")
)
expect_error(print(dfmt, max_ndoc = -2),
"The value of max_ndoc must be between -1 and Inf")
expect_error(print(dfmt, max_nfeat = -2),
"The value of max_nfeat must be between -1 and Inf")
})
test_that("cannot supply remove and select in one call (
txt <- c(d1 = "one two three", d2 = "two three four", d3 = "one three four")
corp <- corpus(txt, docvars = data.frame(grp = c(1, 1, 2)))
toks <- tokens(corp)
expect_error(
suppressWarnings(dfm(txt, select = "one", remove = "two")),
"only one of select and remove may be supplied at once"
)
expect_error(
suppressWarnings(dfm(corp, select = "one", remove = "two")),
"only one of select and remove may be supplied at once"
)
expect_error(
dfm(toks, select = "one", remove = "two"),
"only one of select and remove may be supplied at once"
)
expect_error(
dfm(dfm(toks), select = "one", remove = "two"),
"only one of select and remove may be supplied at once"
)
})
test_that("dfm with selection options produces correct output", {
txt <- c(d1 = "a b", d2 = "a b c d e")
toks <- tokens(txt)
dfmt <- dfm(toks)
feat <- c("b", "c", "d", "e", "f", "g")
expect_message(
suppressWarnings(dfm(txt, remove = feat, verbose = TRUE)),
"removed 4 features"
)
expect_message(
suppressWarnings(dfm(toks, remove = feat, verbose = TRUE)),
"removed 4 features"
)
expect_message(
suppressWarnings(dfm(dfmt, remove = feat, verbose = TRUE)),
"removed 4 features"
)
})
test_that("dfm works with stem options", {
txt_english <- "running ran runs"
txt_french <- "courant courir cours"
quanteda_options(language_stemmer = "english")
expect_equal(
as.character(tokens_wordstem(tokens(txt_english))),
c("run", "ran", "run")
)
expect_equal(
featnames(dfm(tokens(txt_english))),
c("running", "ran", "runs")
)
expect_equal(
featnames(suppressWarnings(dfm(tokens(txt_english), stem = TRUE))),
c("run", "ran")
)
expect_error(
suppressWarnings(dfm(tokens(txt_english), stem = c(TRUE, FALSE))),
"The length of stem must be 1"
)
quanteda_options(language_stemmer = "french")
expect_equal(
as.character(tokens_wordstem(tokens(txt_french))),
rep("cour", 3)
)
expect_equal(
featnames(dfm(tokens(txt_french))),
c("courant", "courir", "cours")
)
expect_equal(
featnames(suppressWarnings(dfm(tokens(txt_french), stem = TRUE))),
"cour"
)
quanteda_options(reset = TRUE)
})
test_that("dfm verbose option prints correctly", {
txt <- c(d1 = "a b c d e", d2 = "a a b c c c")
corp <- corpus(txt)
toks <- tokens(txt)
mydfm <- dfm(toks)
expect_message(suppressWarnings(dfm(txt, verbose = TRUE)), "Creating a dfm from a character input")
expect_message(suppressWarnings(dfm(corp, verbose = TRUE)), "Creating a dfm from a corpus input")
expect_message(dfm(toks, verbose = TRUE), "Creating a dfm from a tokens input")
expect_message(dfm(mydfm, verbose = TRUE), "Creating a dfm from a dfm input")
})
test_that("dfm works with purrr::map (
skip_if_not_installed("purrr")
a <- "a b"
b <- "a a a b b"
suppressWarnings(expect_identical(
vapply(purrr::map(list(a, b), dfm), is.dfm, logical(1)),
c(TRUE, TRUE)
))
suppressWarnings(expect_identical(
vapply(purrr::map(list(corpus(a), corpus(b)), dfm), is.dfm, logical(1)),
c(TRUE, TRUE)
))
expect_identical(
vapply(purrr::map(list(tokens(a), tokens(b)), dfm), is.dfm, logical(1)),
c(TRUE, TRUE)
)
expect_identical(
vapply(purrr::map(list(dfm(tokens(a)), dfm(tokens(b))), dfm), is.dfm, logical(1)),
c(TRUE, TRUE)
)
})
test_that("dfm works when features are created (
dfm1 <- as.dfm(matrix(1:6, nrow = 2,
dimnames = list(c("doc1", "doc2"), c("a", "b", "c"))))
dfm2 <- as.dfm(matrix(1:6, nrow = 2,
dimnames = list(c("doc1", "doc2"), c("b", "c", "feat_2"))))
expect_equal(
as.matrix(dfm_match(dfm1, featnames(dfm2))),
matrix(c(3, 4, 5, 6, 0, 0), nrow = 2,
dimnames = list(docs = c("doc1", "doc2"), features = c("b", "c", "feat_2")))
)
expect_equal(
as.matrix(cbind(dfm(tokens("a b")), dfm(tokens("feat_1")))),
matrix(c(1, 1, 1), nrow = 1, dimnames = list(docs = "text1", features = c("a", "b", "feat_1")))
)
})
test_that("dfm warns of argument not used", {
txt <- c(d1 = "a b c d e", d2 = "a a b c c c")
corp <- corpus(txt)
toks <- tokens(txt)
mx <- dfm(toks)
expect_warning(dfm(txt, xxxxx = "something", yyyyy = "else"),
"^xxxxx, yyyyy arguments are not used")
expect_warning(dfm(corp, xxxxx = "something", yyyyy = "else"),
"^xxxxx, yyyyy arguments are not used")
expect_warning(dfm(toks, xxxxx = "something", yyyyy = "else"),
"^xxxxx, yyyyy arguments are not used")
expect_warning(dfm(mx, xxxxx = "something", yyyyy = "else"),
"^xxxxx, yyyyy arguments are not used")
})
test_that("dfm pass arguments to tokens, issue
txt <- data_char_sampletext
corp <- corpus(txt)
suppressWarnings({
expect_equal(dfm(txt, what = "character"),
dfm(tokens(corp, what = "character")))
expect_equivalent(dfm(txt, what = "character"),
dfm(tokens(txt, what = "character")))
expect_equal(dfm(txt, remove_punct = TRUE),
dfm(tokens(corp, remove_punct = TRUE)))
expect_equivalent(dfm(txt, remove_punct = TRUE),
dfm(tokens(txt, remove_punct = TRUE)))
})
})
test_that("dfm error when a dfm is given to for feature selection when x is not a dfm,
txt <- c(d1 = "a b c d e", d2 = "a a b c c c")
corp <- corpus(txt)
toks <- tokens(txt)
mx <- dfm(toks)
mx2 <- dfm(tokens(c("a b", "c")))
expect_error(suppressWarnings(dfm(txt, select = mx2)),
"dfm cannot be used as pattern")
expect_error(suppressWarnings(dfm(corp, select = mx2)),
"dfm cannot be used as pattern")
expect_error(suppressWarnings(dfm(toks, select = mx2)),
"dfm cannot be used as pattern")
expect_error(suppressWarnings(dfm(mx, select = mx2)),
"dfm cannot be used as pattern; use 'dfm_match' instead")
})
test_that("test topfeatures", {
expect_identical(
topfeatures(dfm(tokens("a a a a b b b c c d")), scheme = "count"),
c(a = 4, b = 3, c = 2, d = 1)
)
expect_error(
topfeatures(dfm(tokens("a a a a b b b c c d")), "count"),
"n must be a number"
)
dfmat <- corpus(c("a b b c", "b d", "b c"),
docvars = data.frame(numdv = c(1, 2, 1))) %>%
tokens() %>%
dfm()
expect_identical(
topfeatures(dfmat, groups = numdv),
list("1" = c(b = 3, c = 2, a = 1, d = 0),
"2" = c(b = 1, d = 1, a = 0, c = 0))
)
expect_identical(
topfeatures(dfmat, scheme = "docfreq"),
c(b = 3L, c = 2L, a = 1L, d = 1L)
)
expect_identical(
topfeatures(dfm_weight(dfmat, scheme = "prop"), groups = numdv),
list("1" = c(b = 1.00, c = 0.75, a = 0.25, d = 0.00),
"2" = c(b = 0.5, d = 0.5, a = 0, c = 0))
)
})
test_that("test sparsity", {
expect_equal(
sparsity(dfm(tokens(c("a a a a c c d", "b b b")))),
0.5
)
})
test_that("test null dfm is handled properly", {
mx <- quanteda:::make_null_dfm()
expect_equal(dfm(mx), mx)
expect_equal(dfm_select(mx), mx)
expect_equal(dfm_select(mx, "a"), mx)
expect_equal(dfm_trim(mx), mx)
expect_equal(dfm_sample(mx), mx)
expect_equal(dfm_subset(mx), mx)
expect_equal(dfm_compress(mx, "both"), mx)
expect_equal(dfm_compress(mx, "features"), mx)
expect_equal(dfm_compress(mx, "documents"), mx)
expect_equal(dfm_sort(mx, margin = "both"), mx)
expect_equal(dfm_sort(mx, margin = "features"), mx)
expect_equal(dfm_sort(mx, margin = "documents"), mx)
expect_equal(dfm_lookup(mx, dictionary(list(A = "a"))), mx)
expect_equal(dfm_group(mx), mx)
expect_equal(dfm_replace(mx, "A", "a"), mx)
expect_equal(head(mx), mx)
expect_equal(tail(mx), mx)
expect_equal(topfeatures(mx), numeric())
expect_equal(dfm_weight(mx, "count"), mx)
expect_equal(dfm_weight(mx, "prop"), mx)
expect_equal(dfm_weight(mx, "propmax"), mx)
expect_equal(dfm_weight(mx, "logcount"), mx)
expect_equal(dfm_weight(mx), mx)
expect_equal(dfm_weight(mx, "augmented"), mx)
expect_equal(dfm_weight(mx, "boolean"), mx)
expect_equal(dfm_weight(mx, "logave"), mx)
expect_equal(dfm_tfidf(mx), mx)
expect_equal(docfreq(mx), numeric())
expect_equal(dfm_smooth(mx), mx)
expect_equal(dfm_tolower(mx), mx)
expect_equal(dfm_toupper(mx), mx)
expect_equal(dfm_wordstem(mx), mx)
expect_equal(rbind(mx, mx), mx)
expect_equal(cbind(mx, mx), mx)
expect_output(print(mx),
"Document-feature matrix of: 0 documents, 0 features (0.00% sparse) and 0 docvars.", fixed = TRUE)
})
test_that("test empty dfm is handled properly (
mx <- dfm_trim(data_dfm_lbgexample, 1000)
docvars(mx) <- data.frame(var = c(1, 5, 3, 6, 6, 4))
expect_equal(dfm(mx), mx)
expect_equal(dfm_select(mx), mx)
expect_equal(dfm_select(mx, "a"), mx)
expect_equal(dfm_trim(mx), mx)
expect_equal(ndoc(dfm_sample(mx)), ndoc(mx))
expect_equal(dfm_subset(mx, var > 5), mx[4:5, ])
expect_equal(dfm_compress(mx, "both"), mx)
expect_equal(dfm_compress(mx, "features"), mx)
expect_equal(dfm_compress(mx, "documents"), mx)
expect_equal(dfm_sort(mx, margin = "both"), mx)
expect_equal(dfm_sort(mx, margin = "features"), mx)
expect_equal(dfm_sort(mx, margin = "documents"), mx)
expect_equal(dfm_lookup(mx, dictionary(list(A = "a"))), mx)
expect_equal(dfm_group(mx), mx)
expect_equal(dfm_replace(mx, "A", "a"), mx)
expect_equal(head(mx), mx)
expect_equal(tail(mx), mx)
expect_equal(topfeatures(mx), numeric())
expect_equal(dfm_weight(mx, "count"), mx)
expect_equal(dfm_weight(mx, "prop"), mx)
expect_equal(dfm_weight(mx, "propmax"), mx)
expect_equal(dfm_weight(mx, "logcount"), mx)
expect_equal(dfm_weight(mx), mx)
expect_equal(dfm_weight(mx, "augmented"), mx)
expect_equal(dfm_weight(mx, "boolean"), mx)
expect_equal(dfm_weight(mx, "logave"), mx)
expect_equal(dfm_tfidf(mx), mx)
expect_equal(docfreq(mx), numeric())
expect_equal(dfm_smooth(mx), mx)
expect_equal(dfm_tolower(mx), mx)
expect_equal(dfm_toupper(mx), mx)
expect_equal(dfm_wordstem(mx), mx)
expect_equal(ndoc(rbind(mx, mx)), ndoc(mx) * 2)
expect_equal(ndoc(cbind(mx, mx)), ndoc(mx))
expect_output(print(mx),
"Document-feature matrix of: 6 documents, 0 features (0.00% sparse) and 1 docvar.", fixed = TRUE)
})
test_that("dfm raise nicer error message,
txt <- c(d1 = "one two three", d2 = "two three four", d3 = "one three four")
mx <- dfm(tokens(txt))
expect_error(mx["d4"], "Subscript out of bounds")
expect_error(mx["d4", ], "Subscript out of bounds")
expect_error(mx[4], "Subscript out of bounds")
expect_error(mx[4, ], "Subscript out of bounds")
expect_error(mx["d4", , TRUE], "Subscript out of bounds")
expect_error(mx[4, , TRUE], "Subscript out of bounds")
expect_error(mx[1:4, , TRUE], "Subscript out of bounds")
expect_error(mx[1:4, , TRUE], "Subscript out of bounds")
expect_error(mx["five"], "Subscript out of bounds")
expect_error(mx[, "five"], "Subscript out of bounds")
expect_error(mx[5], "Subscript out of bounds")
expect_error(mx[, 5], "Subscript out of bounds")
expect_error(mx[, 1:5], "Subscript out of bounds")
expect_error(mx["d4", "five"], "Subscript out of bounds")
expect_error(mx[, "five", TRUE], "Subscript out of bounds")
expect_error(mx[, 5, TRUE], "Subscript out of bounds")
expect_error(mx[, 1:5, TRUE], "Subscript out of bounds")
expect_error(mx["d4", "five", TRUE], "Subscript out of bounds")
expect_error(mx[4, 5], "Subscript out of bounds")
expect_error(mx[4:5], "Subscript out of bounds")
expect_error(mx[1:4, 1:5], "Subscript out of bounds")
expect_error(mx[4, 5, TRUE], "Subscript out of bounds")
expect_error(mx[1:4, 1:5, TRUE], "Subscript out of bounds")
})
test_that("dfm keeps non-existent types,
toks <- tokens("a b c")
dict <- dictionary(list(A = "a", B = "b", Z = "z"))
toks_key <- tokens_lookup(toks, dict)
expect_equal(types(toks_key), c("A", "B", "Z"))
expect_equal(featnames(dfm(toks_key, tolower = TRUE)),
c("a", "b", "z"))
expect_equal(featnames(dfm(toks_key, tolower = FALSE)),
c("A", "B", "Z"))
})
test_that("arithmetic/linear operation works with dfm", {
mt <- dfm(tokens(c(d1 = "a a b", d2 = "a b b c", d3 = "c c d")))
expect_true(is.dfm(mt + 2))
expect_true(is.dfm(mt - 2))
expect_true(is.dfm(mt * 2))
expect_true(is.dfm(mt / 2))
expect_true(is.dfm(mt ^ 2))
expect_true(is.dfm(2 + mt))
expect_true(is.dfm(2 - mt))
expect_true(is.dfm(2 * mt))
expect_true(is.dfm(2 / mt))
expect_true(is.dfm(2 ^ mt))
expect_true(is.dfm(t(mt)))
expect_equal(rowSums(mt), colSums(t(mt)))
})
test_that("rbind and cbind wokrs with empty dfm", {
mt <- dfm(tokens(c(d1 = "a a b", d2 = "a b b c", d3 = "c c d")))
expect_identical(docnames(rbind(mt, quanteda:::make_null_dfm())),
docnames(mt))
expect_identical(docnames(mt),
docnames(rbind(mt, quanteda:::make_null_dfm())))
expect_identical(docnames(cbind(mt, quanteda:::make_null_dfm())),
docnames(mt))
expect_identical(docnames(mt),
docnames(cbind(mt, quanteda:::make_null_dfm())))
})
test_that("format_sparsity works correctly", {
expect_error(
quanteda:::format_sparsity(-1),
"The value of x must be between 0 and 1"
)
expect_identical(
quanteda:::format_sparsity(sparsity(as.dfm(Matrix::rsparsematrix(1000, 1000, density = 0.5)))),
"50.00%"
)
expect_identical(
quanteda:::format_sparsity(sparsity(as.dfm(Matrix::rsparsematrix(1000, 1000, density = 0.1)))),
"90.00%"
)
expect_identical(
quanteda:::format_sparsity(sparsity(as.dfm(Matrix::rsparsematrix(1000, 1000, density = 0.99)))),
"1.00%"
)
expect_identical(quanteda:::format_sparsity(1.0), "100.00%")
expect_identical(quanteda:::format_sparsity(0.9999), "99.99%")
expect_identical(quanteda:::format_sparsity(0.99991), ">99.99%")
expect_identical(quanteda:::format_sparsity(0.0001), "0.01%")
expect_identical(quanteda:::format_sparsity(0.00001), "<0.01%")
expect_identical(quanteda:::format_sparsity(0.00011), "0.01%")
expect_identical(quanteda:::format_sparsity(0.0), "0.00%")
expect_identical(quanteda:::format_sparsity(NA), "0.00%")
})
test_that("unused argument warning only happens only once (
expect_warning(
dfm(tokens("some text"), NOTARG = TRUE),
"^NOTARG argument is not used\\.$"
)
expect_warning(
dfm(corpus("some text"), NOTARG = TRUE),
"^NOTARG argument is not used\\.$"
)
expect_warning(
dfm(tokens("some text"), NOTARG = TRUE),
"^NOTARG argument is not used\\.$"
)
expect_warning(
dfm(tokens("some text"), NOTARG = TRUE, NOTARG2 = FALSE),
"^NOTARG, NOTARG2 arguments are not used\\.$"
)
})
test_that("dfm.tokens() with groups works as expected", {
x <- tokens(data_corpus_inaugural)
groupeddfm <- suppressWarnings(dfm(tokens(x),
groups = c("FF", "FF", rep("non-FF", ndoc(x) - 2))))
expect_equal(ndoc(groupeddfm), 2)
expect_equal(docnames(groupeddfm), c("FF", "non-FF"))
expect_equal(featnames(groupeddfm), featnames(dfm(x)))
})
test_that("dimnames are always character vectors", {
mt <- data_dfm_lbgexample
expect_identical(dimnames(mt[, character()]),
list(docs = rownames(mt), features = character()))
expect_identical(dimnames(mt[, FALSE]),
list(docs = rownames(mt), features = character()))
expect_identical(dimnames(mt[character(), ]),
list(docs = character(), features = colnames(mt)))
expect_identical(dimnames(mt[FALSE, ]),
list(docs = character(), features = colnames(mt)))
})
test_that("set_dfm_dimnames etc functions work", {
x <- dfm(tokens(c("a a b b c", "b b b c")))
quanteda:::set_dfm_featnames(x) <- paste0("feature", 1:3)
expect_identical(featnames(x), c("feature1", "feature2", "feature3"))
quanteda:::set_dfm_docnames(x) <- paste0("DOC", 1:2)
expect_identical(docnames(x), c("DOC1", "DOC2"))
quanteda:::set_dfm_dimnames(x) <- list(c("docA", "docB"), LETTERS[1:3])
expect_identical(docnames(x), c("docA", "docB"))
expect_identical(featnames(x), c("A", "B", "C"))
})
test_that("dfm feature and document names have encoding", {
mt <- dfm(tokens(c("文書1" = "あ い い う", "文書2" = "え え え お")))
expect_true(all(Encoding(colnames(mt)) == "UTF-8"))
mt1 <- dfm_sort(mt)
expect_true(all(Encoding(colnames(mt1)) == "UTF-8"))
mt2 <- dfm_group(mt, c("文書3", "文書3"))
expect_true(all(Encoding(colnames(mt2)) == "UTF-8"))
mt3 <- dfm_remove(mt, c("あ"))
expect_true(all(Encoding(colnames(mt3)) == "UTF-8"))
mt4 <- dfm_trim(mt, min_termfreq = 2)
expect_true(all(Encoding(colnames(mt4)) == "UTF-8"))
})
test_that("dfm verbose = TRUE works as expected", {
expect_message(
tmp <- suppressWarnings(dfm(data_corpus_inaugural[1:3], verbose = TRUE)),
"Creating a dfm from a corpus input"
)
expect_message(
tmp <- dfm(tokens(data_corpus_inaugural[1:3]), verbose = TRUE),
"Finished constructing a 3 x 1,\\d{3} sparse dfm"
)
dict <- dictionary(list(pos = "good", neg = "bad", neg_pos = "not good", neg_neg = "not bad"))
expect_message(
tmp <- suppressWarnings(dfm(tokens(data_corpus_inaugural[1:3]), dictionary = dict, verbose = TRUE)),
"applying a dictionary consisting of 4 keys"
)
expect_message(
tmp <- suppressWarnings(dfm(dfm(tokens(data_corpus_inaugural[1:3])), dictionary = dict, verbose = TRUE)),
"applying a dictionary consisting of 4 keys"
)
expect_message(
tmp <- suppressWarnings(dfm(tokens(data_corpus_inaugural[1:3]),
groups = data_corpus_inaugural$President[1:3],
verbose = TRUE)),
"grouping texts"
)
expect_message(
tmp <- suppressWarnings(dfm(tokens(data_corpus_inaugural[1:2]), stem = TRUE, verbose = TRUE)),
"stemming types \\(English\\)"
)
expect_message(
tmp <- suppressWarnings(dfm(dfm(tokens(data_corpus_inaugural[1:2])), stem = TRUE, verbose = TRUE)),
"stemming features \\(English\\)"
)
expect_message(
tmp <- suppressWarnings(dfm(dfm(tokens(data_corpus_inaugural[1:3])),
groups = data_corpus_inaugural$President[1:3],
verbose = TRUE)),
"grouping texts"
)
expect_error(
dfm(tokens("one two three"), remove = "one", select = "three"),
"only one of select and remove may be supplied at once"
)
toks <- tokens(c("one two", "two three four"))
attributes(toks)$types[4] <- NA
dfm(toks)
})
test_that("dfm_sort works as expected", {
dfmat <- dfm(tokens(c(d1 = "z z x y a b", d3 = "x y y y c", d2 = "a z")))
expect_identical(
featnames(dfm_sort(dfmat, margin = "features", decreasing = TRUE)),
c("y", "z", "x", "a", "b", "c")
)
expect_identical(
featnames(dfm_sort(dfmat, margin = "features", decreasing = FALSE)),
c("b", "c", "x", "a", "z", "y")
)
expect_identical(
docnames(dfm_sort(dfmat, margin = "documents", decreasing = TRUE)),
c("d1", "d3", "d2")
)
expect_identical(
docnames(dfm_sort(dfmat, margin = "documents", decreasing = FALSE)),
rev(c("d1", "d3", "d2"))
)
})
test_that("test dfm transpose for
dfmat <- dfm(tokens(c(d1 = "one two three", d2 = "two two three")))
dfmat_t <- t(dfmat)
expect_equal(
names(dimnames(dfmat_t)),
c("features", "docs")
)
expect_equal(
docnames(dfmat_t),
c("one", "two", "three")
)
expect_equal(
dfmat_t@docvars$docname_,
c("one", "two", "three")
)
expect_equal(
names(dfmat_t@meta),
c("system", "object", "user")
)
})
test_that("dfm deprecations work as expected", {
txt <- c("a a b b c", "a a b c c d d")
corp <- corpus(txt)
toks <- tokens(corp)
dfmat <- dfm(toks)
expect_warning(
dfm(txt),
"'dfm.character()' is deprecated. Use 'tokens()' first.",
fixed = TRUE
)
expect_warning(
dfm(corp),
"'dfm.corpus()' is deprecated. Use 'tokens()' first.",
fixed = TRUE
)
expect_warning(
dfm(txt, stem = TRUE),
"'stem' is deprecated; use dfm_wordstem() instead",
fixed = TRUE
)
expect_warning(
dfm(txt, select = "a"),
"'select' is deprecated; use dfm_select() instead",
fixed = TRUE
)
expect_warning(
dfm(txt, remove = "a"),
"'remove' is deprecated; use dfm_remove() instead",
fixed = TRUE
)
expect_warning(
dfm(txt, dictionary = dictionary(list(one = "b"))),
"'dictionary' and 'thesaurus' are deprecated; use dfm_lookup() instead",
fixed = TRUE
)
expect_warning(
dfm(txt, groups = c(1, 1)),
"'groups' is deprecated; use dfm_group() instead",
fixed = TRUE
)
expect_warning(
dfm(txt, remove = "a", valuetype = "regex"),
"valuetype is deprecated in dfm()", fixed = TRUE
)
expect_warning(
dfm(txt, remove = "a", case_insensitive = FALSE),
"case_insensitive is deprecated in dfm()", fixed = TRUE
)
expect_warning(
dfm(corp, stem = TRUE),
"'stem' is deprecated; use dfm_wordstem() instead",
fixed = TRUE
)
expect_warning(
dfm(corp, select = "a"),
"'select' is deprecated; use dfm_select() instead",
fixed = TRUE
)
expect_warning(
dfm(corp, remove = "a"),
"'remove' is deprecated; use dfm_remove() instead",
fixed = TRUE
)
expect_warning(
dfm(corp, dictionary = dictionary(list(one = "b"))),
"'dictionary' and 'thesaurus' are deprecated; use dfm_lookup() instead",
fixed = TRUE
)
expect_warning(
dfm(corp, groups = c(1, 1)),
"'groups' is deprecated; use dfm_group() instead",
fixed = TRUE
)
expect_warning(
dfm(corp, remove = "a", valuetype = "regex"),
"valuetype is deprecated in dfm()", fixed = TRUE
)
expect_warning(
dfm(corp, remove = "a", case_insensitive = FALSE),
"case_insensitive is deprecated in dfm()", fixed = TRUE
)
expect_warning(
dfm(toks, stem = TRUE),
"'stem' is deprecated; use dfm_wordstem() instead",
fixed = TRUE
)
expect_warning(
dfm(toks, select = "a"),
"'select' is deprecated; use dfm_select() instead",
fixed = TRUE
)
expect_warning(
dfm(toks, remove = "a"),
"'remove' is deprecated; use dfm_remove() instead",
fixed = TRUE
)
expect_warning(
dfm(toks, dictionary = dictionary(list(one = "b"))),
"'dictionary' and 'thesaurus' are deprecated; use dfm_lookup() instead",
fixed = TRUE
)
expect_warning(
dfm(toks, groups = c(1, 1)),
"'groups' is deprecated; use dfm_group() instead",
fixed = TRUE
)
expect_warning(
dfm(toks, remove = "a", valuetype = "regex"),
"valuetype is deprecated in dfm()", fixed = TRUE
)
expect_warning(
dfm(toks, remove = "a", case_insensitive = FALSE),
"case_insensitive is deprecated in dfm()", fixed = TRUE
)
expect_warning(
dfm(dfmat, stem = TRUE),
"'stem' is deprecated; use dfm_wordstem() instead",
fixed = TRUE
)
expect_warning(
dfm(dfmat, select = "a"),
"'select' is deprecated; use dfm_select() instead",
fixed = TRUE
)
expect_warning(
dfm(dfmat, remove = "a"),
"'remove' is deprecated; use dfm_remove() instead",
fixed = TRUE
)
expect_warning(
dfm(dfmat, dictionary = dictionary(list(one = "b"))),
"'dictionary' and 'thesaurus' are deprecated; use dfm_lookup() instead",
fixed = TRUE
)
expect_warning(
dfm(dfmat, groups = c(1, 1)),
"'groups' is deprecated; use dfm_group() instead",
fixed = TRUE
)
expect_warning(
dfm(dfmat, remove = "a", valuetype = "regex"),
"valuetype is deprecated in dfm()", fixed = TRUE
)
expect_warning(
dfm(dfmat, remove = "a", case_insensitive = FALSE),
"case_insensitive is deprecated in dfm()", fixed = TRUE
)
})
test_that("valuetype and case_insensitive are still working", {
txt <- c("a a b b c", "A A b C C d d")
corp <- corpus(txt)
toks <- tokens(corp)
dfmat <- dfm(toks, tolower = FALSE)
expect_identical(
featnames(suppressWarnings(dfm(txt, tolower = FALSE, remove = "a|c",
valuetype = "regex"))),
c("b", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(txt, tolower = FALSE, remove = "a|c",
valuetype = "regex", case_insensitive = TRUE))),
c("b", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(txt, tolower = FALSE, remove = "a|c",
valuetype = "regex", case_insensitive = FALSE))),
c("b", "A", "C", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(corp, tolower = FALSE, remove = "a|c",
valuetype = "regex"))),
c("b", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(corp, tolower = FALSE, remove = "a|c",
valuetype = "regex", case_insensitive = TRUE))),
c("b", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(corp, tolower = FALSE, remove = "a|c",
valuetype = "regex", case_insensitive = FALSE))),
c("b", "A", "C", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(dfmat, tolower = FALSE, remove = "a|c",
valuetype = "regex"))),
c("b", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(dfmat, tolower = FALSE, remove = "a|c",
valuetype = "regex", case_insensitive = TRUE))),
c("b", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(dfmat, tolower = FALSE, remove = "a|c",
valuetype = "regex", case_insensitive = FALSE))),
c("b", "A", "C", "d")
)
})
test_that("remove_padding argument works", {
txt <- c("a a b b c", "a a b c c d d")
toks <- tokens(txt) %>% tokens_remove("b", padding = TRUE)
dfmat <- dfm(toks)
expect_identical(
featnames(suppressWarnings(dfm(txt, remove_padding = TRUE))),
c("a", "b", "c", "d")
)
expect_identical(
featnames(suppressWarnings(dfm(txt, remove_padding = FALSE))),
c("a", "b", "c", "d")
)
expect_identical(
featnames(dfm(toks, remove_padding = FALSE)),
c("", "a", "c", "d")
)
expect_identical(
featnames(dfm(dfmat, remove_padding = TRUE)),
c("a", "c", "d")
)
expect_identical(
featnames(dfm(dfmat, remove_padding = FALSE)),
c("", "a", "c", "d")
)
})
test_that("features of DFM are always in the same order (
toks1 <- quanteda:::build_tokens(list(c(1, 0, 2, 3, 4)), types = c("a", "b", "c", "d"),
padding = TRUE,
docvars = quanteda:::make_docvars(1L))
toks2 <- quanteda:::build_tokens(list(c(1, 0, 3, 2, 4)), types = c("a", "c", "b", "d"),
padding = TRUE,
docvars = quanteda:::make_docvars(1L))
toks3 <- quanteda:::build_tokens(list(c(1, 2, 3, 4)), types = c("a", "b", "c", "d"),
padding = FALSE,
docvars = quanteda:::make_docvars(1L))
dfmat1 <- dfm(toks1)
dfmat2 <- dfm(toks2)
dfmat3 <- dfm(toks3)
expect_identical(c("", "a", "b", "c", "d"), featnames(dfmat1))
expect_identical(c("", "a", "b", "c", "d"), featnames(dfmat2))
expect_identical(c("a", "b", "c", "d"), featnames(dfmat3))
}) |
add_gnp_graph <- function(graph,
n,
p,
loops = FALSE,
type = NULL,
label = TRUE,
rel = NULL,
node_aes = NULL,
edge_aes = NULL,
node_data = NULL,
edge_data = NULL,
set_seed = NULL) {
time_function_start <- Sys.time()
fcn_name <- get_calling_fcn()
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
if (!is.null(set_seed)) {
set.seed(set_seed, kind = "Mersenne-Twister")
}
if (n <= 0) {
emit_error(
fcn_name = fcn_name,
reasons = "The value for `n` must be at least 1")
}
nodes_created <- graph$last_node
edges_created <- graph$last_edge
global_attrs <- graph$global_attrs
graph_log <- graph$graph_log
graph_info <- graph$graph_info
graph_directed <- graph$directed
sample_gnp_igraph <-
igraph::sample_gnp(
n = n,
p = p,
directed = graph_directed,
loops = loops)
sample_gnp_graph <- from_igraph(sample_gnp_igraph)
if (!is.null(type)) {
sample_gnp_graph$nodes_df$type <- as.character(type[1])
}
if (!is.null(rel)) {
sample_gnp_graph$edges_df$rel <- as.character(rel[1])
}
if (label == TRUE) {
sample_gnp_graph$nodes_df$label <-
sample_gnp_graph$nodes_df$id %>% as.character()
}
n_nodes <- nrow(sample_gnp_graph$nodes_df)
n_edges <- nrow(sample_gnp_graph$edges_df)
if (!is.null(node_aes)) {
node_aes_tbl <- dplyr::as_tibble(node_aes)
if (nrow(node_aes_tbl) < nrow(sample_gnp_graph$nodes_df)) {
node_aes$index__ <- 1:nrow(sample_gnp_graph$nodes_df)
node_aes_tbl <-
dplyr::as_tibble(node_aes) %>%
dplyr::select(-index__)
}
if ("id" %in% colnames(node_aes_tbl)) {
node_aes_tbl <-
node_aes_tbl %>%
dplyr::select(-id)
}
}
if (!is.null(node_data)) {
node_data_tbl <- dplyr::as_tibble(node_data)
if (nrow(node_data_tbl) < nrow(sample_gnp_graph$nodes_df)) {
node_data$index__ <- 1:nrow(sample_gnp_graph$nodes_df)
node_data_tbl <-
dplyr::as_tibble(node_data) %>%
dplyr::select(-index__)
}
if ("id" %in% colnames(node_data_tbl)) {
node_data_tbl <-
node_data_tbl %>%
dplyr::select(-id)
}
}
if (!is.null(edge_aes)) {
edge_aes_tbl <- dplyr::as_tibble(edge_aes)
if (nrow(edge_aes_tbl) < nrow(sample_gnp_graph$edges_df)) {
edge_aes$index__ <- 1:nrow(sample_gnp_graph$edges_df)
edge_aes_tbl <-
dplyr::as_tibble(edge_aes) %>%
dplyr::select(-index__)
}
if ("id" %in% colnames(edge_aes_tbl)) {
edge_aes_tbl <-
edge_aes_tbl %>%
dplyr::select(-id)
}
}
if (!is.null(edge_data)) {
edge_data_tbl <- dplyr::as_tibble(edge_data)
if (nrow(edge_data_tbl) < nrow(sample_gnp_graph$edges_df)) {
edge_data$index__ <- 1:nrow(sample_gnp_graph$edges_df)
edge_data_tbl <-
dplyr::as_tibble(edge_data) %>%
dplyr::select(-index__)
}
if ("id" %in% colnames(edge_data_tbl)) {
edge_data_tbl <-
edge_data_tbl %>%
dplyr::select(-id)
}
}
if (exists("node_aes_tbl")) {
sample_gnp_graph$nodes_df <-
sample_gnp_graph$nodes_df %>%
dplyr::bind_cols(node_aes_tbl)
}
if (exists("node_data_tbl")) {
sample_gnp_graph$nodes_df <-
sample_gnp_graph$nodes_df %>%
dplyr::bind_cols(node_data_tbl)
}
if (exists("edge_aes_tbl")) {
sample_gnp_graph$edges_df <-
sample_gnp_graph$edges_df %>%
dplyr::bind_cols(edge_aes_tbl)
}
if (exists("edge_data_tbl")) {
sample_gnp_graph$edges_df <-
sample_gnp_graph$edges_df %>%
dplyr::bind_cols(edge_data_tbl)
}
if (!is_graph_empty(graph)) {
graph <- combine_graphs(graph, sample_gnp_graph)
} else {
graph <- sample_gnp_graph
}
graph$last_node <- nodes_created + n_nodes
graph$last_edge <- edges_created + n_edges
graph_log <-
add_action_to_log(
graph_log = graph_log,
version_id = nrow(graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df),
d_n = n_nodes,
d_e = n_edges)
graph$global_attrs <- global_attrs
graph$graph_log <- graph_log
graph$graph_info <- graph_info
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
graph
} |
Generate_hatchling_metric <-
function(series=stop("A result object or names of series must be provided"), hatchling.metric=NULL, previous=NULL) {
if (is.null(hatchling.metric) & (class(series)!="NestsResult")) {
stop("hatchling.metric or a result from searchR() must be provided")
}
if (class(series)=="NestsResult") {
if (is.null(hatchling.metric)) testec <- series$hatchling.metric
series <- series$data
}
if (class(series)=="Nests") {
series <- names(series)
series <- series[1:(series$IndiceT["NbTS"])]
}
if (!is.null(hatchling.metric)) {
if (is.na(hatchling.metric["Mean"]) | is.na(hatchling.metric["SD"]) | length(hatchling.metric["Mean"])!=length(hatchling.metric["Mean"])) {
return("hatchling.metric must be a vector with same number of Mean and SD values")
} else {
mean <- rep(hatchling.metric["Mean"], length(series))[1:length(series)]
sd <- rep(hatchling.metric["SD"], length(series))[1:length(series)]
hatchling.metric_ec <- data.frame(Mean=mean, SD=sd, row.names=series)
}
}
if (!is.null(previous)) {
hatchling.metric <- rbind(previous, hatchling.metric_ec)
} else {
hatchling.metric <- hatchling.metric_ec
}
return(hatchling.metric)
} |
binarize.kMeans <- function(vect, nstart=1, iter.max=10, dip.test=TRUE, na.rm=FALSE){
if(!is.numeric(vect))
stop("The input vector must consist of numerical values!")
if (!na.rm && any(is.na(vect)))
stop("Cannot binarize in the presence of NA values!")
else
if (na.rm)
{
vect <- vect[!is.na(vect)]
}
if (any(!is.finite(vect)))
stop("Cannot binarize Inf values!")
if(!is.numeric(nstart))
stop("'nstart' must be numeric!")
if(nstart < 0)
stop("'nstart' must be >= 0!")
if(!is.numeric(iter.max))
stop("'iter.max' must be numeric!")
if(iter.max < 0)
stop("'iter.max' must be >= 0!")
if(length(vect) < 3)
stop("The input vector must have at least 3 entries!")
if(length(unique(vect))==1)
stop("The input vector is constant!")
if (dip.test)
{
p.value <- dip.test(vect)$p.value
}
else
p.value <- as.numeric(NA)
km_res <- kmeans(vect, 2, nstart = nstart, iter.max = iter.max)
if(km_res$centers[1] > km_res$centers[2]){
binarizeddata <- abs(km_res$cluster - 2)
}
else{
binarizeddata <- km_res$cluster - 1
}
threshold <- (max(vect[!as.logical(binarizeddata)]) + min(vect[as.logical(binarizeddata)])) / 2
return(new("BinarizationResult",
originalMeasurements = vect,
binarizedMeasurements = as.integer(binarizeddata),
threshold = threshold,
p.value = p.value,
method = "k-Means"))
} |
linearPoolDensity <- function(fit, xl = -Inf, xu = Inf, d = "best", lpw = 1, nx = 200){
if(xl == -Inf & min(fit$limits[,1]) > -Inf){xl <- min(fit$limits[,1]) }
if(xl == -Inf & min(fit$limits[,1]) == -Inf){
f1 <- feedback(fit, quantiles=0.01, dist=d)
xl <- min(f1$expert.quantiles)
}
if(xu == Inf & max(fit$limits[,2]) < Inf){xu <- max(fit$limits[,2]) }
if(xu == Inf & max(fit$limits[,2]) == Inf){
f2 <- feedback(fit, quantiles=0.99, dist=d)
xu <- max(f2$expert.quantiles)
}
n.experts <- nrow(fit$vals)
x <- matrix(0, nx, n.experts)
fx <- x
if(min(lpw)<0 | max(lpw)<=0){stop("expert weights must be non-negative, and at least one weight must be greater than 0.")}
if(length(lpw)==1){
lpw <- rep(lpw, n.experts)
}
weight <- matrix(lpw/sum(lpw), nx, n.experts, byrow = T)
for(i in 1:n.experts){
densitydata <- expertdensity(fit, d, ex = i, pl = xl, pu = xu, nx = nx)
x[, i] <- densitydata$x
fx[, i] <-densitydata$fx
}
fx.lp <- apply(fx * weight, 1, sum)
list(x = x[, 1], f = fx.lp)
} |
"print.Gamex" <-
function(x,...)
{
print(x$coefficients)
invisible()
} |
xrs_gr<-function(X){
if (missing(X)){
stop("No hay muestras para leer, No sample to read")
} else {
x<-X
m<-nrow(x)
n<-ncol(x)
X.prom<-apply(x,1,mean)
f.rango<-function(x){
f.rango.p<-range(x)
return(f.rango.p[2]-f.rango.p[1])
}
X.range<-apply(x,1,f.rango)
X.S<-apply(x,1,sd)
data(factor.a, envir = environment())
LCS.X<-expression(mean(X.prom) + factor.a$A2[n-1] * mean(X.range))
LCI.X<-expression(mean(X.prom) - factor.a$A2[n-1] * mean(X.range))
LC.X<-expression(mean(X.prom))
LCS.R<-expression(mean(X.range)*factor.a$D4[n-1])
LCI.R<-expression(mean(X.range)*factor.a$D3[n-1])
LC.R<-expression(mean(X.range))
LCS.S<-expression(mean(X.S)*factor.a$B4[n-1])
LCI.S<-expression(mean(X.S)*factor.a$B3[n-1])
LC.S<-expression(mean(X.S))
mat<-matrix(1:4,2,2,byrow=TRUE)
layout(mat)
layout.show(length(1:4))
hist.X<-function(x=X.prom,breaks="Sturges"){
hist(X.prom, breaks = breaks,
xlab="Valores", ylab="Frecuencia",
main="Histograma de los promedios")
}
plot.X<-function(x=X.prom,type="b",col="blue",pch =19){
plot(x=x, xlab= "Numero de muestra", ylab="Valores de cada muestra",
main="Grafica X, Control Estadistico de la Calidad",type=type, col=col,
ylim=c(min(eval(LCI.X), min(X.prom)), max(eval(LCS.X), max(X.prom))),
xlim=c(-0.05*m, 1.05*m), pch = pch)
abline(h= c(eval(LCS.X), eval(LCI.X), eval(LC.X)),col="lightgray")
text(c(rep(1,3),rep(7,3)), rep(c(eval(LCS.X),eval(LC.X),eval(LCI.X)),2),
c(c("LCS = ","LC = ","LCI = "), c(round(eval(LCS.X),2),
round(eval(LC.X),2),
round(eval(LCI.X),2))), col="red")
}
plot.R<-function(x=X.range,type="b",col="black",pch =15){
plot(x=x, xlab= "Numero de muestra", ylab="Rangos de cada muestra",
main="Grafica R, Control Estadistico de la Calidad",type=type, col=col,
ylim=c(min(eval(LCI.R)-min(X.range)*0.05, min(X.range)*.95),
max(eval(LCS.R)+max(X.range)*0.05, max(X.range)*1.05)),
xlim=c(-0.05*m, 1.05*m), pch = pch)
abline(h= c(eval(LCS.R), eval(LCI.R), eval(LC.R)), col="lightgray")
text(c(rep(1,3),rep(7,3)), rep(c(eval(LCS.R),eval(LC.R),eval(LCI.R)),2),
c(c("LCS = ","LC = ","LCI = "),
c(round(eval(LCS.R),2), round(eval(LC.R),2),
round(eval(LCI.R),2))), col="red")
}
plot.S<-function(x=X.S,type="b",col="gray",pch =15){
plot(x=x, xlab= "Numero de muestra", ylab="Desviacion estandar de cada muestra",
main="Grafica S, Control Estadistico de la Calidad",type=type, col=col,
ylim=c(min(eval(LCI.S)-min(X.S)*0.05, min(X.S)*0.95),
max(eval(LCS.S)+max(X.S)*0.05, max(X.S)*1.05)),
xlim=c(-0.05*m, 1.05*m), pch = pch)
abline(h= c(eval(LCS.S), eval(LCI.S), eval(LC.S)), col="lightgray")
text(c(rep(1,3),rep(7,3)), rep(c(eval(LCS.S),eval(LC.S),eval(LCI.S)),2),
c(c("LCS = ","LC = ","LCI = "),
c(round(eval(LCS.S),2), round(eval(LC.S),2),
round(eval(LCI.S),2))), col="red")
}
X.pos <- which(X.prom > eval(LCI.X) & X.prom < eval(LCS.X))
x.1<-x[X.pos,]
X.fs<- which(X.prom >= eval(LCS.X))
X.fi<- which(X.prom <= eval(LCI.X))
X.f<-c(X.fs,X.fi)
R.pos <- which(X.range > eval(LCI.R) & X.range < eval(LCS.R))
X.range.1<-X.range[R.pos]
S.pos <- which(X.S > eval(LCI.S) & X.S < eval(LCS.S))
bin.X<-if(length(X.pos)< m){
bin.X<-1
} else {
bin.X<-0
}
bin.R<-if(length(R.pos)< m){
bin.R<-1
} else {
bin.R<-0
}
bin.S<-if(length(S.pos)< m){
bin.S<-1
} else {
bin.S<-0
}
hist.X()
plot.X()
plot.R()
plot.S()
}
structure(list("in.control" = X.pos,
"R.in.control" = R.pos,
"out.control" = X.f,
"Iteraciones" = 1,
"data.0"= x,
"data.1"= x.1,
"data.r.1" = X.range.1,
"bin" = c(bin.X, bin.R, bin.S),
"LX"= c("LCI"=eval(LCI.X), "LC"=eval(LC.X),"LCS"=eval(LCS.X)),
"LR"= c("LCI"=eval(LCI.R), "LC"=eval(LC.R), "LCS"=eval(LCS.R)),
"LS"= c("LCI"=eval(LCI.S), "LC"=eval(LC.S), "LCS"=eval(LCS.S)),
"Limites Grafica X" = c("LCI.X"=eval(LCI.X), "LC.X"=eval(LC.X),"LCS.X"=eval(LCS.X)),
"Limites Grafica R" = c("LCI.R"=eval(LCI.R), "LC.R"=eval(LC.R), "LCS.R"=eval(LCS.R)),
"Limites Grafica S" = c("LCI.S"=eval(LCI.S), "LC.S"=eval(LC.S), "LCS.S"=eval(LCS.S)),
"Conclusion del proceso"= c(if(length(X.pos)< m){
print("Proceso fuera de Control en Grafica X")
} else {
print("El proceso esta bajo control en Grafica X")
}, if(length(R.pos)< m){
print("Proceso fuera de control en Grafica R")
} else {
print("El proceso esta bajo control en Grafica R")
}, if(length(S.pos)< m){
print("Proceso fuera de control en Grafica S")
} else {
print("El proceso esta bajo control en Grafica S")
})))
} |
rate <- 1/10
v <- (1 / rate)^2
mu <- 10
zci(rexp(20, rate), sd = sqrt(v))$conf.int
CIsim(n = c(2, 5, 20), samples = 10000,
rdist = rexp, args = list(rate = rate), estimand = mu,
method = zci, method.args = list(sd = sqrt(v))) |
library(ggplot2)
this_base <- "fig04-24_mountain-height-data-by-continent"
my_data <- data.frame(
cont = c("Asia", "S.America", "N.America", "Africa",
"Antarctica", "Europe", "Austrailia"),
height = c(29029, 22838, 20322, 19341, 16050, 16024, 7310),
mountain = c("Everest", "Aconcagua", "McKinley", "Kilmanjaro",
"Vinson", "Blanc", "Kosciuszko"),
stringsAsFactors = FALSE)
p <- ggplot(my_data, aes(x = cont, y = height, group = factor(1))) +
geom_line() + geom_point() +
scale_y_continuous(breaks = seq(0, 35000, 5000), limits = c(0, 35000),
expand = c(0, 0)) +
labs(x = "Continent", y = "Height (feet)") +
ggtitle("Fig 4.24 Mountain Height Data by Continent") +
theme_bw() +
theme(panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(colour = "grey50"),
plot.title = element_text(size = rel(1.2), face = "bold", vjust = 1.5),
axis.title = element_text(face = "bold"))
p
ggsave(paste0(this_base, ".png"),
p, width = 6, height = 4) |
NOT_CRAN <- identical(tolower(Sys.getenv("NOT_CRAN")), "true")
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
purl = NOT_CRAN,
eval = NOT_CRAN
)
library(here)
library(rdfp)
token_path <- here::here("tests", "testthat", "rdfp_token.rds")
suppressMessages(dfp_auth(token = token_path, verbose = FALSE))
options_path <- here::here("tests", "testthat", "rdfp_options.rds")
rdfp_options <- readRDS(options_path)
options(rdfp.network_code = rdfp_options$network_code)
request_data <- list(reportJob =
list(reportQuery =
list(dimensions = 'MONTH_AND_YEAR',
dimensions = 'AD_UNIT_ID',
dimensions = 'AD_UNIT_NAME',
dimensions = 'ADVERTISER_NAME',
dimensions = 'ORDER_NAME',
dimensions = 'LINE_ITEM_NAME',
adUnitView = 'FLAT',
columns = 'AD_SERVER_IMPRESSIONS',
columns = 'AD_SERVER_CLICKS',
dateRangeType = 'LAST_WEEK')
)
)
report_data <- dfp_full_report_wrapper(request_data)
report_data[,c('Dimension.MONTH_AND_YEAR', 'Dimension.AD_UNIT_ID', 'Column.AD_SERVER_CLICKS')]
request_data <- list(filterStatement=list(query="WHERE id = 936165016"))
this_result <- dfp_getSavedQueriesByStatement(request_data, as_df=FALSE)
this_report_query <- this_result$reportQuery
request_data <- list(reportJob=list(reportQuery = this_report_query))
report_data <- dfp_full_report_wrapper(request_data)
report_data[,c('Dimension.AD_UNIT_ID', 'Column.AD_SERVER_CLICKS')]
request_data <- list(reportJob=list(reportQuery=list(dimensions='MONTH_AND_YEAR',
dimensions='AD_UNIT_ID',
adUnitView='FLAT',
columns='AD_SERVER_CLICKS',
dateRangeType='LAST_WEEK'
)))
dfp_runReportJob_result <- dfp_runReportJob(request_data)
dfp_runReportJob_result$id
request_data <- list(reportJobId = dfp_runReportJob_result$id)
dfp_getReportJobStatus_result <- dfp_getReportJobStatus(request_data, as_df = FALSE)
dfp_getReportJobStatus_result
counter <- 0
while(dfp_getReportJobStatus_result != 'COMPLETED' & counter < 10){
dfp_getReportJobStatus_result <- dfp_getReportJobStatus(request_data, as_df = FALSE)
Sys.sleep(3)
counter <- counter + 1
}
request_data <- list(reportJobId=dfp_runReportJob_result$id, exportFormat='CSV_DUMP')
dfp_getReportDownloadURL_result <- dfp_getReportDownloadURL(request_data, as_df = FALSE)
report_data <- dfp_report_url_to_dataframe(report_url = dfp_getReportDownloadURL_result,
exportFormat = 'CSV_DUMP')
report_data[,c('Dimension.MONTH_AND_YEAR', 'Dimension.AD_UNIT_ID', 'Column.AD_SERVER_CLICKS')] |
library(deSolve)
N = 10^0.31
x0 = c(N, 0.00001, 0.00001, 0.00002)
y <- c(X = x0)
times <- c( 0.0208, 0.1098, 0.2696, 0.4999, 0.8002, 1.1697, 1.6077, 2.1129, 2.6843, 3.3205, 4.0200, 4.7811, 5.6020, 6.4808, 7.4154, 8.4035, 9.4429, 10.5310, 11.6653, 12.8431, 14.0616, 15.3179, 16.6090, 17.9319, 19.2834, 20.6603, 22.0594, 23.4773, 24.9107, 26.3561, 27.8102, 29.2695, 30.7305, 32.1898, 33.6439, 35.0893, 36.5227, 37.9406, 39.3397, 40.7166, 42.0681, 43.3910, 44.6821, 45.9384, 47.1569, 48.3347, 49.4690, 50.5571, 51.5965, 52.5846, 53.5192, 54.3980, 55.2189, 55.9800, 56.6795, 57.3157, 57.8871, 58.3923, 58.8303, 59.1998, 59.5001, 59.7304, 59.8902, 59.9792)
parameters = 10^c("k1"=0.31, "k2"=-1, "k3"=-0.49, "k4"= 0.42, "s1"=-0.21, "s2"=-0.34)
inputData <- read.table('http://jeti.uni-freiburg.de/PNAS_Swameye_Data/DATA1_hall_inp')
inputData[nrow(inputData),2] = 0.009
colnames(inputData) <- c('t','u')
measure <- read.table('http://jeti.uni-freiburg.de/PNAS_Swameye_Data/DATA1_hall')
colnames(measure) <- c("t","y1","y1sd","y2","y2sd")
modelJakStat <- function(t, x, parameters, input) {
with (as.list(parameters),{
u <- input$u(t)
dx1 = -k1 * x[1] * u
dx2 = k1 * x[1] * u - k2 * x[2]^2
dx3 = -k3*x[3] + 0.5*k2*x[2]*x[2]
dx4 = k3 * x[3]
list(c(dx1 ,dx2 ,dx3 ,dx4 ))
})
}
measJakStat <- function(x) {
s1 <- 10^(-0.21)
s2 <- 10^(-0.34)
y1 = s1*(x[,2]+ 2*x[,3])
y2 = s2*(x[,1] + x[,2] + 2*x[,3])
return(cbind(y1,y2))
}
y <- data.frame(measure[,1], measure[,2], measure[,4])
sd <- data.frame(measure[,1], measure[,3], measure[,5])
JakStatConst <- '2*x4+ 2*x3 + x1 + x2 == N'
JakStatModel <- odeModel(func = modelJakStat, parms = parameters, input = inputData, times = times,
measFunc = measJakStat, y = x0, meas = y, sd = sd)
plot(nominalSol(JakStatModel))
results <- DEN(odeModel = JakStatModel, alphaStep = 0.01, alpha2 = 0.4, epsilon = 0.2,cString = JakStatConst, plotEstimates = TRUE, conjGrad = FALSE)
statesAnno <- c("STAT5 cyt.", "STAT5p cyt.", "STAT5p-d cyt.", "stat5-d nucl")
measurAnno <- c("total STAT5p", "total STAT5")
plotAnno(results[[2]], stateAnno = statesAnno, measAnno = measurAnno)
summary(results[[2]])
hiddenInputs(resultsSeeds = results, ind = 2)
estiStates(resultsSeeds = results, ind = 2)
outputEstimates(resultsSeeds = results, ind = 2)
confidenceBands(resultsSeeds = results, slot = "states", ind = 2) |
gevrRlPlot <- function(z, conf = 0.95, method = c("delta", "profile")) {
if(!z$stationary)
stop("Model must be stationary")
method <- match.arg(method)
p <- c(seq(0.001, 0.01, by = 0.005), seq(0.01, 0.09, by = 0.01), 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.995, 0.999)
levels <- matrix(0, length(p), 3)
for(i in 1:nrow(levels)) {
y <- gevrRl(z, 1/p[i], conf = conf, method = method)
levels[i, 1] <- y$Estimate
levels[i, 2:3] <- y$CI
}
plot(-1/log((1:length(z$data[,1]))/(length(z$data[,1]) + 1)), sort(z$data[,1]), log = "x", type = "n",
xlab = "Return Period", ylab = "Return Level", xlim = c(0.1, 1000), ylim = c(min(z$data[, 1], levels[, 1]), max(z$data[, 1], levels[, 1])))
title("Return Level Plot")
lines(-1/log(1-p), levels[, 1])
lines(-1/log(1-p), levels[, 2], col = 4)
lines(-1/log(1-p), levels[, 3], col = 4)
points(-1/log((1:length(z$data[,1]))/(length(z$data[,1]) + 1)), sort(z$data[,1]))
}
pgevMarg1 <- function(z, j, locvec, scalevec, shapevec) {
p <- rep(0, z$n)
for(i in 1:z$n) {
for(k in 0:(j-1)) {
p[i] <- p[i] + nzsh((z$data[i, j] - locvec[i]) / scalevec[i], shapevec[i])^k / gamma(k+1)
}
p[i] <- p[i] * exp(-nzsh((z$data[i, j] - locvec[i]) / scalevec[i], shapevec[i]))
}
p
}
pgevMarg2 <- function(x, z, j, i, locvec, scalevec, shapevec) {
p <- 0
for(k in 0:(j-1)) {
p <- p + nzsh((x - locvec[i]) / scalevec[i], shapevec[i])^k / gamma(k+1)
}
p * exp(-nzsh((x - locvec[i]) / scalevec[i], shapevec[i]))
}
gevrQQ <- function(z, j, locvec, scalevec, shapevec) {
qgevMarg <- function(x, q, i) {
q - pgevMarg2(x, z, j, i, locvec, scalevec, shapevec)
}
emp <- rep(0, z$n)
Series <- seq(1, z$n, 1) / (z$n + 1)
ztrans <- nzsh(-(z$data[, j] - locvec) / scalevec, - shapevec)
Series <- Series[order(Series)[rank(ztrans)]]
for(i in 1:z$n) {
emp[i] <- uniroot(qgevMarg, interval = c(min(z$data[, j]) - 2, max(z$data[, j]) + 2), q = Series[i], i = i)$root
}
plot(z$data[, j], emp, xlab = "Empirical", ylab = "Model",
xlim = c(min(z$data[, j], emp), max(z$data[, j], emp)), ylim = c(min(z$data[, j], emp), max(z$data[, j], emp)))
if(z$stationary)
title(paste("Quantile Plot, j=", j, sep = ""))
if(!z$stationary)
title(paste("Residual Quantile Plot, j=", j, sep = ""))
abline(0, 1, col = 4)
}
gevrPP <- function(z, j, locvec, scalevec, shapevec) {
n <- z$n
Series <- seq(1, z$n, 1) / (z$n + 1)
p <- pgevMarg1(z, j, locvec, scalevec, shapevec)
p <- sort(p)
plot(p, Series, xlab = "Empirical", ylab = "Model", xlim = c(0,1), ylim = c(0,1))
if(z$stationary)
title(paste("Probability Plot, ", "j=", j, sep = ""))
if(!z$stationary)
title(paste("Residual Probability Plot, ", "j=", j, sep = ""))
abline(0, 1, col = 4)
}
dgevMarg <- function(x, j, loc = loc, scale = scale, shape = shape) {
if(length(shape) == 1)
shape <- rep(shape, max(length(x), length(loc), length(scale)))
w <- (x - loc) / scale
ifelse(shape == 0, exp(-exp(-w) - j*w) / (scale * factorial(j-1)),
(nzsh(w, shape)^j / (scale * gamma(j))) * exp(-nzsh(w, shape)))
}
gevrHist <- function(z, j) {
if(!z$stationary)
stop("Model must be stationary")
h <- hist(z$data[, j], plot = FALSE)
x <- seq(min(h$breaks), max(h$breaks), (max(h$breaks) - min(h$breaks))/1000)
if(!z$gumbel) shape <- z$par.ests[3] else shape <- 0
if(j == 1)
y <- dgevr(x, loc = z$par.ests[1], scale = z$par.ests[2], shape = shape)
if(j > 1)
y <- dgevMarg(x, j, loc = z$par.ests[1], scale = z$par.ests[2], shape = shape)
hist(z$data[, j], freq = FALSE, ylim = c(0, max(max(h$density), max(y))),
xlab = "x", ylab = "Density", main = paste("Density Plot, j=", j, sep = ""))
points(z$data[, j], rep(0, length(z$data[, j])))
lines(x, y, col = 4)
}
gevrResid <- function(z, locvec, scalevec, shapevec) {
if(z$stationary)
stop("Model cannot be stationary")
resid <- nzsh((z$data[, 1] - locvec) / scalevec, shapevec)
if(z$parnum[1] > 1) {
for(i in 2:z$parnum[1]) {
plot(z$covars[[1]][, i], resid, xlab = paste("Location", colnames(z$covars[[1]])[i], sep = " "), ylab = "Residuals")
lines(lowess(z$covars[[1]][, i], resid), col = "red")
}
}
if(z$parnum[2] > 1) {
for(i in 2:z$parnum[2]) {
plot(z$covars[[2]][, i], resid, xlab = paste("Scale", colnames(z$covars[[2]])[i], sep = " "), ylab = "Residuals")
lines(lowess(z$covars[[2]][, i], resid), col = "red")
}
}
if(z$parnum[3] > 1) {
for(i in 2:z$parnum[3]) {
plot(z$covars[[3]][, i], resid, xlab = paste("Shape", colnames(z$covars[[3]])[i], sep = " "), ylab = "Residuals")
lines(lowess(z$covars[[3]][, i], resid), col = "red")
}
}
}
gevrDiag <- function(z, conf = 0.95, method = c("delta", "profile")) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
method <- match.arg(method)
par(ask = TRUE, mfcol = c(2, 2))
locvec <- z$links[[1]](rowSums(t(z$par.ests[1:z$parnum[1]] * t(z$covars[[1]]))))
scalevec <- z$links[[2]](rowSums(t(z$par.ests[(z$parnum[1] + 1):(z$parnum[1] + z$parnum[2])] * t(z$covars[[2]]))))
if(!z$gumbel) {
shapevec <- z$links[[3]](rowSums(t(z$par.ests[(z$parnum[1] + z$parnum[2] + 1):(z$parnum[1] + z$parnum[2] + z$parnum[3])] * t(z$covars[[3]]))))
} else {
shapevec <- rep(0, z$n)
}
choice <- 1
while(choice > 0) {
choice <- menu(c("Return Level Plot", "Marginal Density Plot(s)", "Marginal PP Plot(s)",
"Marginal QQ Plot(s)", "Residual Scatterplot(s)"), title = "\nMake a plot selection (or 0 to exit):")
switch(choice + 1,
cat("Exited\n"),
if(!z$stationary) stop("Model must be stationary") else try(gevrRlPlot(z, conf, method), silent = TRUE),
if(!z$stationary) stop("Model must be stationary") else for(i in 1:z$R) try(gevrHist(z, i), silent = TRUE),
for(i in 1:z$R) try(gevrPP(z, i, locvec, scalevec, shapevec), silent = TRUE),
for(i in 1:z$R) try(gevrQQ(z, i, locvec, scalevec, shapevec), silent = TRUE),
if(z$stationary) stop("Model cannot be stationary") else try(gevrResid(z, locvec, scalevec, shapevec), silent = TRUE)
)
}
par(mfrow = c(1, 1))
} |
cesDerivCoef <- function( par, xNames, tName = NULL, data, vrs, nested = FALSE,
returnRho1 = TRUE, returnRho2 = TRUE, returnRho = TRUE, rhoApprox ) {
nExog <- length( xNames )
coefNames <- cesCoefNames( nExog = nExog, vrs = vrs,
returnRho1 = returnRho1, returnRho2 = returnRho2,
returnRho = returnRho, nested = nested, withTime = !is.null( tName ) )
if( !nested ) {
rhoApprox <- cesCheckRhoApprox( rhoApprox = rhoApprox, withY = NA,
withDeriv = TRUE )
}
result <- matrix( NA, nrow = nrow( data ), ncol = length( coefNames ) )
colnames( result ) <- coefNames
names( par ) <- cesCoefNames( nExog = nExog, vrs = vrs, returnRho = TRUE,
returnRho1 = TRUE, returnRho2 = TRUE, nested = nested,
withTime = !is.null( tName ) )
if( !nested ) {
if( nExog != 2 ) {
stop( "the derivatives of the non-nested CES can be calculated",
" only for two inputs" )
}
gamma <- par[ "gamma" ]
if( !is.null( tName ) ) {
gamma <- gamma * exp( par[ "lambda" ] * data[[ tName ]] )
}
delta <- par[ "delta" ]
rho <- par[ "rho" ]
if( vrs ) {
nu <- par[ "nu" ]
} else {
nu <- 1
}
if( abs( rho ) > rhoApprox[ "gamma" ] ) {
result[ , "gamma" ] <-
( delta * data[[ xNames[ 1 ] ]]^(-rho) + ( 1 - delta ) * data[[ xNames[ 2 ] ]]^(-rho) )^( -nu / rho )
} else {
result[ , "gamma" ] <-
data[[ xNames[ 1 ] ]]^( nu * delta ) *
data[[ xNames[ 2 ] ]]^( nu * ( 1 - delta ) ) *
exp( - 0.5 * rho * nu * delta * ( 1 - delta ) *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) )^2 )
}
if( !is.null( tName ) ) {
result[ , "gamma" ] <- result[ , "gamma" ] *
exp( par[ "lambda" ] * data[[ tName ]] )
}
if( !is.null( tName ) ) {
result[ , "lambda" ] <- result[ , "gamma" ] *
par[ "gamma" ] * data[[ tName ]]
}
if( abs( rho ) > rhoApprox[ "delta" ] ) {
result[ , "delta" ] <- - ( gamma * nu / rho ) *
( data[[ xNames[ 1 ] ]]^(-rho) - data[[ xNames[ 2 ] ]]^(-rho) ) *
( delta * data[[ xNames[ 1 ] ]]^(-rho) +
( 1 - delta ) * data[[ xNames[ 2 ] ]]^(-rho) )^( - nu / rho - 1 )
} else {
result[ , "delta" ] <- gamma * nu *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) ) *
data[[ xNames[ 1 ] ]]^( nu * delta ) *
data[[ xNames[ 2 ] ]]^( nu * ( 1 - delta ) ) *
( 1 - ( rho / 2 ) * ( 1 - 2 * delta + nu * delta * ( 1 - delta ) *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) ) ) *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) ) )
}
if( returnRho ) {
if( abs( rho ) > rhoApprox[ "rho" ] ) {
result[ , "rho" ] <- ( gamma * nu / rho^2 ) *
log( delta * data[[ xNames[ 1 ] ]]^(-rho) +
( 1 - delta ) * data[[ xNames[ 2 ] ]]^(-rho) ) *
( delta * data[[ xNames[ 1 ] ]]^(-rho) +
( 1 - delta ) * data[[ xNames[ 2 ] ]]^(-rho) )^( -nu / rho ) +
( gamma * nu / rho ) *
( delta * log( data[[ xNames[ 1 ] ]] ) * data[[ xNames[ 1 ] ]]^(-rho) +
( 1 - delta ) * log( data[[ xNames[ 2 ] ]] ) * data[[ xNames[ 2 ] ]]^(-rho) ) *
( delta * data[[ xNames[ 1 ] ]]^(-rho) +
( 1 - delta ) * data[[ xNames[ 2 ] ]]^(-rho) )^( -nu / rho - 1 )
} else {
result[ , "rho" ] <- gamma * nu * delta * ( 1 - delta ) *
data[[ xNames[ 1 ] ]]^( nu * delta ) *
data[[ xNames[ 2 ] ]]^( nu * ( 1 - delta ) ) *
( - ( 1 / 2 ) *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) )^2
+ ( 1 / 3 ) * rho * ( 1 - 2 * delta ) *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) )^3
+ ( 1 / 4 ) * rho * nu * delta * ( 1 - delta ) *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) )^4 )
}
}
if( vrs ) {
if( abs( rho ) > rhoApprox[ "nu" ] ) {
result[ , "nu" ] <- - ( gamma / rho ) *
log( delta * data[[ xNames[ 1 ] ]]^(-rho) +
( 1 - delta ) * data[[ xNames[ 2 ] ]]^(-rho) ) *
( delta * data[[ xNames[ 1 ] ]]^(-rho) +
( 1 - delta ) * data[[ xNames[ 2 ] ]]^(-rho) )^( -nu / rho )
} else {
result[ , "nu" ] <- gamma *
data[[ xNames[ 1 ] ]]^( nu * delta ) *
data[[ xNames[ 2 ] ]]^( nu * ( 1 - delta ) ) *
( delta * log( data[[ xNames[ 1 ] ]] ) +
( 1 - delta ) * log( data[[ xNames[ 2 ] ]] ) -
( rho * delta * ( 1 - delta ) / 2 ) *
( log( data[[ xNames[ 1 ] ]] ) - log( data[[ xNames[ 2 ] ]] ) )^2 *
( 1 + nu * ( delta * log( data[[ xNames[ 1 ] ]] ) +
( 1 - delta ) * log( data[[ xNames[ 2 ] ]] ) ) ) )
}
}
} else if( nExog == 3 ) {
if( !vrs ) {
par <- c( par, nu = 1 )
}
result[ , "gamma" ] <- cesInterN3(
funcName = "cesDerivCoefN3Gamma", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "gamma" ] )
if( !is.null( tName ) ) {
result[ , "lambda" ] <- cesInterN3(
funcName = "cesDerivCoefN3Lambda", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "gamma" ] )
}
result[ , "delta_1" ] <- cesInterN3(
funcName = "cesDerivCoefN3Delta1", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "delta" ] )
result[ , "delta" ] <- cesInterN3(
funcName = "cesDerivCoefN3Delta", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "delta" ] )
if( returnRho1 ) {
result[ , "rho_1" ] <- cesInterN3(
funcName = "cesDerivCoefN3Rho1", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "rho" ] )
}
if( returnRho ) {
result[ , "rho" ] <- cesInterN3(
funcName = "cesDerivCoefN3Rho", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "rho" ] )
}
if( vrs ) {
result[ , "nu" ] <- cesInterN3(
funcName = "cesDerivCoefN3Nu", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "nu" ] )
}
} else if( nExog == 4 ) {
if( !vrs ) {
par <- c( par, nu = 1 )
}
result[ , "gamma" ] <- cesInterN4(
funcName = "cesDerivCoefN4Gamma", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "gamma" ] )
if( !is.null( tName ) ) {
result[ , "lambda" ] <- cesInterN4(
funcName = "cesDerivCoefN4Lambda", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "gamma" ] )
}
result[ , "delta_1" ] <- cesInterN4(
funcName = "cesDerivCoefN4Delta1", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "delta" ] )
result[ , "delta_2" ] <- cesInterN4(
funcName = "cesDerivCoefN4Delta2", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "delta" ] )
result[ , "delta" ] <- cesInterN4(
funcName = "cesDerivCoefN4Delta", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "delta" ] )
if( returnRho1 ) {
result[ , "rho_1" ] <- cesInterN4(
funcName = "cesDerivCoefN4Rho1", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "rho" ] )
}
if( returnRho2 ) {
result[ , "rho_2" ] <- cesInterN4(
funcName = "cesDerivCoefN4Rho2", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "rho" ] )
}
if( returnRho ) {
result[ , "rho" ] <- cesInterN4(
funcName = "cesDerivCoefN4Rho", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "rho" ] )
}
if( vrs ) {
result[ , "nu" ] <- cesInterN4(
funcName = "cesDerivCoefN4Nu", par = par,
xNames = xNames, tName = tName, data = data,
rhoApprox = rhoApprox[ "nu" ] )
}
} else {
stop( "the derivatives of the nested CES can be calculated",
" only for three and four inputs" )
}
return( result )
} |
tdmReadDataset <- function(opts) {
if (is.null(opts$READ.TrnFn))
stop("opts$READ.TrnFn is missing. Need a function(opts) for reading the train-validation data!")
if (is.null(opts$TST.COL)) opts$TST.COL = "TST.COL";
if (opts$READ.TXT) {
cat1(opts,opts$filename,": Read data from",opts$filename,"...\n")
dset <- opts$READ.TrnFn(opts);
if (!is.null(opts$READ.TstFn)) {
cat1(opts,opts$filename,": Read test data from",opts$filetest, "...\n");
tset <- opts$READ.TstFn(opts);
}
}
if (!is.null(opts$READ.TstFn)) {
if (is.null(opts$TST.COL)) stop("Need a non-NULL definition for opts$TST.COL!");
dset <- tdmBindResponse(dset,opts$TST.COL,rep(0,nrow(dset)))
tset <- tdmBindResponse(tset,opts$TST.COL,rep(1,nrow(tset)))
dset <- rbind(dset,tset)
}
cat1(opts,opts$filename,":", length(dset[,1]), "records read.\n")
dset;
}
tdmReadTrain <- function(opts) {
dset = read.csv(file=paste(opts$path,opts$dir.txt, opts$filename,sep="/"), nrow=opts$READ.NROW);
}
tdmReadTest <- function(opts) {
dset = read.csv(file=paste0(opts$path,opts$dir.txt, opts$filetest, sep="/"), nrow=opts$READ.NROW);
} |
check.misleading.factor <- function(null, resp.var){
covar <- null[, colnames(null) != resp.var, drop = FALSE]
if(ncol(covar) == 0){
return(NULL)
}
id <- NULL
for(i in 1:ncol(covar)){
if(class(covar[, i]) %in% c("factor", "character")){
id <- c(id, i)
}
}
if(length(id) == 0){
return(NULL)
}
mis.id <- NULL
for(i in id){
nlevel <- length(unique(covar[, i]))
if(nlevel/nrow(covar) > .1){
mis.id <- c(mis.id, i)
}
}
if(length(mis.id) == 0){
return(NULL)
}
mis.factor <- colnames(covar)[mis.id]
mis.factor <- gsub("^factor.", "", mis.factor)
if(length(mis.factor) > 10){
n <- length(mis.factor) - 10
mis.factor <- sort(mis.factor)
mis.factor <- head(mis.factor, 11)
mis.factor[11] <- paste("and", n, "others")
}
msg <- paste0("Are those covariates or factor below really factors? They have too many levels: \n",
paste(mis.factor, collapse = " "), "\n")
message(msg)
return(NULL)
} |
current_cols <- function() {
vroom::cols_only(
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_character(),
vroom::col_character(),
vroom::col_date(),
vroom::col_double(),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_double(),
vroom::col_factor(),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_logical(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_logical(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_logical(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_character()
)
}
old_cols <- function() {
vroom::cols_only(
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_character(),
vroom::col_character(),
vroom::col_date(),
vroom::col_double(),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_double(),
vroom::col_factor(),
vroom::col_double(),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_logical(),
vroom::col_date(format = "%d-%b-%y"),
vroom::col_logical(),
vroom::col_date(format = "%d-%b-%y"),
)
}
hts_cols <- function() {
vroom::cols(
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_character(),
vroom::col_character(),
vroom::col_character(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_double(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_integer(),
vroom::col_integer(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_date(format = "%m/%d/%Y %I:%M:%S %p"),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_integer(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_factor(),
vroom::col_date(format = "%m/%d/%Y %I:%M:%S %p"),
vroom::col_factor(),
vroom::col_date(format = "%m/%d/%Y %I:%M:%S %p"),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_character(),
vroom::col_character(),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_character()
)
}
recency_cols <- function() {
vroom::cols(
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_character(),
vroom::col_factor(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_double(),
vroom::col_factor(),
vroom::col_character(),
vroom::col_date(format = "%d-%b-%Y"),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_logical(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_date(format = "%e-%m-%Y"),
vroom::col_character(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_factor(),
vroom::col_double(),
vroom::col_date(format = "%d-%m-%Y"),
vroom::col_date(format = "%d-%m-%Y"),
vroom::col_date(format = "%d-%m-%Y"),
vroom::col_character(),
vroom::col_character()
)
}
ndr_types <- function() {
c("treatment", "hts", "recency")
} |
threshPred_UI <- function(id) {
ns <- NS(id)
tagList(
tags$div(title='Create binary map of predicted presence/absence assuming all values above threshold value represent presence. Also can be interpreted as a "potential distribution" (see guidance).',
selectInput(ns('predThresh'), label = "Set threshold",
choices = list("No threshold" = 'noThresh',
"Minimum Training Presence" = 'mtp',
"10 Percentile Training Presence" = 'p10')))
)
}
threshPred_MOD <- function(input, output, session, pred) {
reactive({
if (input$predThresh != 'noThresh') {
occs.xy <- rvs$occs[c('longitude', 'latitude')]
if (rvs$comp7.type == 'logistic') {
predCur <- rvs$modPredsLog[[rvs$modSel]]
} else if (rvs$comp7.type == 'cloglog') {
predCur <- rvs$modPredsCLL[[rvs$modSel]]
} else {
predCur <- rvs$modPreds[[rvs$modSel]]
}
occPredVals <- raster::extract(predCur, occs.xy)
x <- thresh(occPredVals, input$predThresh)
pred <- pred > x
names(pred) <- paste0(rvs$modSel, '_thresh_', input$predThresh)
rvs %>% writeLog(input$predThresh, 'threshold selected: value =',
round(x, digits = 3), '.')
}
return(list(thresh=input$predThresh, pred=pred))
})
} |
test_that("prop_gte", {
df1 <- data.frame(
x = 1:100,
y = c(1:25, NA, 27:50, NA, 52:100)
)
chk_even <- function(x) (as.numeric(x) %% 2) == 0
expect_success(expect_prop_gte(x, chk_even, prop = 0.5, data = df1))
expect_failure(expect_prop_gte(y, chk_even, prop = 0.5, data = df1))
expect_prop_even <- function(var, prop, data) {
expect_prop_gte({{var}}, func = chk_even, prop = prop, data = data)
}
expect_success(expect_prop_even(x, prop = 0.5, data = df1))
expect_failure(expect_prop_even(y, prop = 0.5, data = df1))
})
test_that("proportion missing", {
df1 <- data.frame(
names = c("Kinto", "Al", "error", "Paddy"),
answers = c(1, "", 0, NA)
)
expect_success(expect_prop_nmiss(names, prop = 0.9, data = df1))
expect_failure(expect_prop_nmiss(names, prop = 0.9, miss = getOption("testdat.miss_text"), data = df1))
expect_failure(expect_prop_nmiss(answers, prop = 3 / 4, data = df1))
})
test_that("proportion valid values", {
for (i in 1:10) {
df1 <- data.frame(
key = 1:100,
binary2 = sample(0:2, 100, TRUE)
)
if (sum(df1$binary2 %in% 0:1) / nrow(df1) >= 0.6) {
expect_success(expect_prop_values(binary2, prop = 0.6, 0:1, data = df1))
} else {
expect_failure(expect_prop_values(binary2, prop = 0.6, 0:1, data = df1))
}
}
}) |
library("vcr")
invisible(vcr::vcr_configure(
dir = "../fixtures"
))
vcr::check_cassette_names() |
context("Fetch distribution data for a survey")
test_that("fetch_distributions returns a tbl_df with expected column names and types", {
vcr::use_cassette("fetch_distributions", {
x <- fetch_distributions("SV_0CBoEpG6UVXH4SV")
})
expect_s3_class(x, c("tbl_df","tbl","data.frame"))
expect_s3_class(x$sendDate, c("POSIXct", "POSIXt"))
expect_s3_class(x$createdDate, c("POSIXct", "POSIXt"))
expect_s3_class(x$modifiedDate, c("POSIXct", "POSIXt"))
expect_type(x$id, "character")
expect_type(x$parentDistributionId, "character")
expect_type(x$ownerId, "character")
expect_type(x$organizationId, "character")
expect_type(x$requestStatus, "character")
expect_type(x$requestType, "character")
expect_type(x$customHeaders, "character")
expect_type(x$headers_fromEmail, "character")
expect_type(x$headers_replyToEmail, "character")
expect_type(x$headers_fromName, "character")
expect_type(x$subjectMessage_messageId, "character")
expect_type(x$subjectMessage_libraryId, "character")
expect_type(x$recipients_mailingListId, "character")
expect_type(x$recipients_contactId, "character")
expect_type(x$recipients_libraryId, "character")
expect_type(x$recipients_sampleId, "character")
expect_type(x$message_libraryId, "character")
expect_type(x$message_messageId, "character")
expect_type(x$message_messageText, "character")
expect_type(x$surveyLink_surveyId, "character")
expect_type(x$surveyLink_expirationDate, "character")
expect_type(x$surveyLink_linkType, "character")
expect_type(x$embeddedData, "character")
expect_type(x$stats_sent, "integer")
expect_type(x$stats_failed, "integer")
expect_type(x$stats_started, "integer")
expect_type(x$stats_bounced, "integer")
expect_type(x$stats_opened, "integer")
expect_type(x$stats_skipped, "integer")
expect_type(x$stats_finished, "integer")
expect_type(x$stats_complaints, "integer")
expect_type(x$stats_blocked, "integer")
}) |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(DWBmodelUN)
library(raster)
Coord_comparison(P_sogamoso, PET_sogamoso)
data(GRU, param)
GRU.maps <- buildGRUmaps(GRU, param)
alpha1_v <- GRU.maps$alpha1
alpha2_v <- GRU.maps$alpha2
smax_v <- GRU.maps$smax
d_v <- GRU.maps$d
init <- init_state(GRU.maps$smaxR)
g_v <- init$In_ground
s_v <- init$In_storage
rm(init)
setup_data <- readSetup(Read = TRUE)
Dates <- seq(as.Date( gsub('[^0-9.]','',colnames(P_sogamoso)[3]), format = "%Y.%m.%d"),
as.Date(gsub('[^0-9.]','',tail(colnames(P_sogamoso),1)) , format = "%Y.%m.%d"), by = "month")
Start.sim <- which(Dates == setup_data[8,1]); End.sim <- which(Dates == setup_data[10,1])
Sim.Period <- c(Start.sim:End.sim)+2
DWB.sogamoso <- DWBCalculator(P_sogamoso[ ,Sim.Period],
PET_sogamoso[ ,Sim.Period],
g_v, s_v, alpha1_v, alpha2_v, smax_v, d_v)
library(DWBmodelUN)
library(raster)
data(P_sogamoso, PET_sogamoso)
Coord_comparison(P_sogamoso, PET_sogamoso)
data(GRU,basins)
cellBasins <- cellBasins(GRU, basins)
GRU.maps <- buildGRUmaps(GRU, param)
init <- init_state(GRU.maps$smaxR)
g_v <- init$In_ground
s_v <- init$In_storage
rm(init)
setup_data <- readSetup(Read = TRUE)
Dates <- seq(as.Date( gsub('[^0-9.]','',colnames(P_sogamoso)[3]), format = "%Y.%m.%d"),
as.Date(gsub('[^0-9.]','',tail(colnames(P_sogamoso),1)) , format = "%Y.%m.%d"), by = "month")
Start.sim <- which(Dates == setup_data[8,1])
End.sim <- which(Dates == setup_data[11,1])
Sim.Period <- c(Start.sim:End.sim)+2
Start.cal <- which(Dates == setup_data[9,1])
End.cal <- which(Dates == as.Date("2004-12-01"))
Cal.Period <- c(Start.cal:End.cal)+2
data(EscSogObs)
NSE_Sogamoso_DWB <- function(parameters, P, PET, g_v,s_v, Sim.Period, EscObs, Cal.Period){
parameters <- as.vector(parameters)
param <- matrix(parameters, nrow = raster::cellStats(GRU,stat="max"))
GRU.maps <- buildGRUmaps(GRU, param)
alpha1_v <- GRU.maps$alpha1
alpha2_v <- GRU.maps$alpha2
smax_v <- GRU.maps$smax
d_v <- GRU.maps$d
DWB.sogamoso <- DWBCalculator(P_sogamoso[ ,Sim.Period], PET_sogamoso[ ,Sim.Period],
g_v,s_v, alpha1_v, alpha2_v, smax_v,d_v, calibration = TRUE)
Esc.Sogamoso <- varBasins(DWB.sogamoso$q_total, cellBasins$cellBasins)
sim <- Esc.Sogamoso$varAverage[Cal.Period - 2, ]
obs <- EscSogObs[Cal.Period - 2, ]
if (sum(!is.na(sim)) == prod(dim(sim))){
numer <- apply((sim - obs)^2, 2, sum, na.rm = TRUE)
demom <- apply((obs - apply(obs, 2, mean, na.rm = TRUE))^2, 2, sum, na.rm = TRUE)
nse.cof <- 1 - numer / demom
} else {
nse.cof <- NA
}
Perf <- (-1)*nse.cof
if(!is.na(mean(Perf))){
Mean.Perf <- mean(Perf)
} else {Mean.Perf <- 1e100}
return(Mean.Perf)
}
xBounds.df <- data.frame(lower = rep(0, times = 40), upper = rep(c(1, 2000), times = c(30, 10)))
result <- dds(xBounds.df = xBounds.df, numIter=2, OBJFUN=NSE_Sogamoso_DWB,
P = P_sogamoso, PET = PET_sogamoso, g_v = g_v, s_v = s_v, Sim.Period = Sim.Period,
EscObs = EscSogObs, Cal.Period = Cal.Period)
library(DWBmodelUN)
library(dygraphs)
data(P_sogamoso)
P.est <- ts(c(t(P_sogamoso[1, -2:-1])), star = c(2012, 1), frequency = 12)
var <- list("Precipitation" = P.est)
graphDWB(var, tp = 1, main = "Precipitation Lat:7.0 Lon:-72.94")
library(DWBmodelUN)
library(dygraphs)
data(P_sogamoso, simDWB.sogamoso, EscSogObs)
P.est <- ts(c(t(P_sogamoso[1, -2:-1])), star = c(2012, 1), frequency = 12)
runoff.sim <- ts(simDWB.sogamoso[c(131:192) ,1], star = c(2012, 1), frequency = 12)
runoff.obs <- ts(EscSogObs[c(131:192) ,1] , star = c(2012, 1), frequency = 12)
var <- list("Precipitation" = P.est,"Runoff.sim" = runoff.sim, "Runoff.obs" = runoff.obs)
graphDWB(var, tp = 3, main = "DWB results at Sogamoso Basin")
library(DWBmodelUN)
library(dygraphs)
data(P_sogamoso, PET_sogamoso, simDWB.sogamoso)
P <- ts(c(t(P_sogamoso[1, -2:-1])), star = c(2012, 1), frequency = 12)
PET <- ts(c(t(PET_sogamoso[1, -2:-1])), star = c(2012, 1), frequency = 12)
runoff.sim <- ts(simDWB.sogamoso[c(131:192), 1], star = c(2012, 1), frequency = 12)
var <- list("P" = P,"PET" = PET, "Runoff.sim" = runoff.sim)
graphDWB(var, tp = 4, main = "General Comparison Sogamoso Basin") |
context("Sequent Peak Algorithm")
sp.storage <- read.csv2("tallaksen-sequent-peak-storage.csv")[425:1883, ]
sp.storage$time <- as.Date(sp.storage$time, format = "%d.%m.%y")
sp.summary <- read.csv2("tallaksen-sequent-peak-summary.csv")
sp.summary$start <- as.Date(sp.summary$start, format = "%d.%m.%Y")
sp.summary <- sp.summary[sp.summary$start >=min(sp.storage$time) &
sp.summary$start <= max(sp.storage$time), ]
ng <- xts(x = data.frame(discharge = sp.storage$streamflow),
order.by = sp.storage$time)
flowunit(ng) <- "m^3/s"
ng <- .check_xts(ng)
deficit <- pool_sp(find_droughts(ng, threshold = 5.18))
test_that("internal storage is computed correctly", {
expect_equal(nrow(deficit), nrow(sp.storage))
deficit$storage <- 0
for(i in setdiff(unique(deficit$event.no), 0)) {
rng <- deficit$event.no == i
deficit$storage[rng] <- cumsum(deficit$def.increase[rng])
}
expect_equal2(as.vector(deficit$storage), sp.storage$storage * 86400,
tolerance = 1e-2,
label = "Deficit volumes given in Tallakesen is equal to computed deficit volumes")
})
test_that("deficit volume is computed correctly", {
expect_equal(summary(deficit, drop_minor = 0)$volume,
sp.summary$def.volume * 86400, tolerance = 1e-3)
})
test_that("deficit duration is computed correctly", {
expect_equal(summary(deficit, drop_minor = 0)$duration,
sp.summary$duration)
}) |
corCFA <-
function(mimm=NULL, R=mycor, data=d, fac.names=NULL,
Rmd=NULL, explain=getOption("explain"),
interpret=getOption("interpret"), results=getOption("results"),
labels=c("include", "exclude", "only"),
min_cor=.10, min_res=.05, iter=50, grid=TRUE,
resid=TRUE, item_cor=TRUE, sort=TRUE,
main=NULL, heat_map=TRUE, bottom=NULL, right=NULL,
pdf_file=NULL, width=5, height=5,
F1=NULL, F2=NULL, F3=NULL, F4=NULL, F5=NULL,
F6=NULL, F7=NULL, F8=NULL, F9=NULL, F10=NULL,
F11=NULL, F12=NULL, F13=NULL, F14=NULL, F15=NULL,
F16=NULL, F17=NULL, F18=NULL, F19=NULL, F20=NULL,
fun_call=NULL, ...) {
dots <- list(...)
if (!is.null(dots)) if (length(dots) > 0) {
for (i in 1:length(dots)) {
if (grepl(".", names(dots)[i], fixed=TRUE)) {
nm <- gsub(".", "_", names(dots)[i], fixed=TRUE)
assign(nm, dots[[i]])
get(nm)
}
}
}
if (exists(deparse(substitute(data)), where=.GlobalEnv, inherits=FALSE))
dname <- deparse(substitute(data))
else
dname <- NULL
if (is.null(fun_call)) fun_call <- match.call()
labels <- match.arg(labels)
if (!is.null(pdf_file))
if (!grepl(".pdf", pdf_file)) pdf_file <- paste(pdf_file, ".pdf", sep="")
max.fname <- 0
if (!is.null(fac.names)) {
for (i in 1:length(fac.names))
if (nchar(fac.names[i]) > max.fname) max.fname <- nchar(fac.names[i])
}
if (is.null(dname) && !is.null(Rmd)) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"Need to read from the data table (frame) to generate a Rmd_ \n\n")
}
df.name <- deparse(substitute(data))
options(dname = df.name)
NFmax <- 20
if (labels!="only") {
NVOld <- as.integer(nrow(R))
vars.all <- as.list(seq_along(as.data.frame(R)))
names(vars.all) <- names(as.data.frame(R))
nm <- dimnames(R)[[1]]
}
else {
NVOld <- nrow(data)
vars.all <- as.list(seq_along(data))
names(vars.all) <- names(data)
nm <- names(data)
}
if (!is.null(mimm)) {
nm.mimm <- deparse(substitute(mimm))
c <- ""
for (i in 1:nchar(mimm)) {
s <- substr(mimm,i,i)
if (s!=" " && s!="~" ) c <- paste(c,s, sep="")
}
c <- strsplit(c, "\n")[[1]]
d <- c[which(nchar(c)>0)]
NF <- length(d)
fac.names <- character(length=NFmax)
vars <- character(length=NFmax)
for (i in 1:NF) {
f <- strsplit(d[i], "=")[[1]]
fac.names[i] <- f[1]
vars[i] <- gsub("+", ",", f[2], fixed=TRUE)
vars[i] <- paste("c(", vars[i], ")", sep="")
}
F1n <- eval(parse(text=vars[1]), vars.all, parent.frame())
F2n <- eval(parse(text=vars[2]), vars.all, parent.frame())
F3n <- eval(parse(text=vars[3]), vars.all, parent.frame())
F4n <- eval(parse(text=vars[4]), vars.all, parent.frame())
F5n <- eval(parse(text=vars[5]), vars.all, parent.frame())
F6n <- eval(parse(text=vars[6]), vars.all, parent.frame())
F7n <- eval(parse(text=vars[7]), vars.all, parent.frame())
F8n <- eval(parse(text=vars[8]), vars.all, parent.frame())
F9n <- eval(parse(text=vars[9]), vars.all, parent.frame())
F10n <- eval(parse(text=vars[10]), vars.all, parent.frame())
F11n <- eval(parse(text=vars[11]), vars.all, parent.frame())
F12n <- eval(parse(text=vars[12]), vars.all, parent.frame())
F13n <- eval(parse(text=vars[13]), vars.all, parent.frame())
F14n <- eval(parse(text=vars[14]), vars.all, parent.frame())
F15n <- eval(parse(text=vars[15]), vars.all, parent.frame())
F16n <- eval(parse(text=vars[16]), vars.all, parent.frame())
F17n <- eval(parse(text=vars[17]), vars.all, parent.frame())
F18n <- eval(parse(text=vars[18]), vars.all, parent.frame())
F19n <- eval(parse(text=vars[19]), vars.all, parent.frame())
F20n <- eval(parse(text=vars[20]), vars.all, parent.frame())
for (i in 1:NFmax) {
fnum <- eval(parse(text=paste("F", toString(i), "n", sep="")))
if (nchar(vars[i] == 0)) fnum <- NULL
}
}
else {
F1n <- eval(substitute(F1), vars.all, parent.frame())
F2n <- eval(substitute(F2), vars.all, parent.frame())
F3n <- eval(substitute(F3), vars.all, parent.frame())
F4n <- eval(substitute(F4), vars.all, parent.frame())
F5n <- eval(substitute(F5), vars.all, parent.frame())
F6n <- eval(substitute(F6), vars.all, parent.frame())
F7n <- eval(substitute(F7), vars.all, parent.frame())
F8n <- eval(substitute(F8), vars.all, parent.frame())
F9n <- eval(substitute(F9), vars.all, parent.frame())
F10n <- eval(substitute(F10), vars.all, parent.frame())
F11n <- eval(substitute(F11), vars.all, parent.frame())
F12n <- eval(substitute(F12), vars.all, parent.frame())
F13n <- eval(substitute(F13), vars.all, parent.frame())
F14n <- eval(substitute(F14), vars.all, parent.frame())
F15n <- eval(substitute(F15), vars.all, parent.frame())
F16n <- eval(substitute(F16), vars.all, parent.frame())
F17n <- eval(substitute(F17), vars.all, parent.frame())
F18n <- eval(substitute(F18), vars.all, parent.frame())
F19n <- eval(substitute(F19), vars.all, parent.frame())
F20n <- eval(substitute(F20), vars.all, parent.frame())
NF <- 0
for (i in 1:NFmax) {
fnum <- eval(parse(text=paste("F", toString(i), "n", sep="")))
if (!is.null(fnum)) NF <- NF + 1
}
}
Label <- c(F1n,F2n,F3n,F4n,F5n,F6n,F7n,F8n,F9n,F10n,F11n,F12n,
F13n,F14n,F15n,F16n,F17n,F18n,F19n,F20)
if (NF == 0) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"Number of Factors: ", NF, "\n",
"Need to specify some factors.", "\n\n",
"For example, F1=c(...), F2=c(...), etc.\n\n")
}
if (!is.null(fac.names)) if (length(fac.names) < NF) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"Only ", length(fac.names), " factor names entered for ", NF,
" factors\n\n")
}
LblCut <- matrix(nrow=NF, ncol=2)
NItems <- 0
for (i in 1:NF) {
LblCut[i,1] <- NItems + 1
cFac <- eval(parse(text=paste("F", toString(i), "n", sep="")))
if (length(cFac) == 0) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"Factor Number ", i, " has no items.\n",
"Each factor must have at least one item.\n\n")
}
NItems <- NItems + length(cFac)
LblCut[i,2] <- NItems
}
for (i in 1:NItems) {
if (Label[i] > NVOld) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"Number of items in correlation matrix: ", NVOld, "\n",
"Item number in Factor specification: ", Label[i], "\n\n",
"Specified item does not exist in this correlation matrix.\n\n")
}
}
txlbl <- ""
if (labels == "only") {
tx <- character(length = 0)
tx[length(tx)+1] <- ""
for (i in 1:NF) {
tx[length(tx)+1] <- paste("F", toString(i), sep="")
if (max.fname > 0)
tx[length(tx)] <- paste(tx[length(tx)], " - ", fac.names[i], sep="")
tx[length(tx)+1] <- .dash2(30)
for (j in LblCut[i,1]:LblCut[i,2]) {
options(xname = nm[Label[j]])
tx[length(tx)+1] <- paste(nm[Label[j]], ": ", .getlabels()$xl, sep="")
}
tx[length(tx)+1] <- ""
}
txlbl <- tx
class(txlbl) <- "out"
output <- list(out_labels=txlbl)
}
else {
outR <- R[Label,Label]
nm_new <- colnames(outR)
cc <- as.character(dimnames(outR)[[1]])
max.chr <- 0
for (i in 1:NItems)
if (nchar(cc[i]) > max.chr) max.chr <- nchar(cc[i])
if (max.chr < 4) max.chr <- 4
rr <- matrix(rep(0, NF*NItems), nrow=NF)
cc <- matrix(rep(0, NF*(NItems+NF)), nrow=(NItems+NF))
outR <- cbind(rbind(outR,rr),cc)
alpha <- numeric(length=NF)
omega <- numeric(length=NF)
out <- .mimm(outR, LblCut, NItems, NF, iter)
nmF <- character(length=NF)
for (i in 1:NF) nmF[i] <- paste("F", toString(i), sep="")
NVTot <- NItems + NF
nm <- character(length=NVTot)
nm <- c(nm_new, nmF)
dimnames(out$R) <- list(nm, nm)
if (sort) {
pt <- numeric(length=NItems)
newLabel <- numeric(length=NItems)
for (ifac in 1:NF) {
n1 <- LblCut[ifac,1]
n2 <- LblCut[ifac,2]
irow <- NItems + ifac
for (j in n1:n2) pt[j] <- out$R[irow, j]
o <- order(pt[n1:n2], decreasing=TRUE)
for (i in 1:(n2-n1+1)) newLabel[n1-1+i] <- Label[n1-1+o[i]]
}
Label <- newLabel
outR <- R[Label,Label]
nm_new <- colnames(outR)
rr <- matrix(rep(0, NF*NItems), nrow=NF)
cc <- matrix(rep(0, NF*(NItems+NF)), nrow=(NItems+NF))
outR <- cbind(rbind(outR,rr),cc)
alpha <- numeric(length=NF)
omega <- numeric(length=NF)
out <- .mimm(outR, LblCut, NItems, NF, iter)
nmF <- character(length=NF)
if (is.null(fac.names))
for (i in 1:NF) nmF[i] <- paste("F", toString(i), sep="")
else
for (i in 1:NF) nmF[i] <- fac.names[i]
nm <- character(length=NVTot)
nm <- c(nm_new, nmF)
dimnames(out$R) <- list(nm, nm)
}
if (heat_map) {
if (is.null(main)) main <- ""
.opendev(pdf_file, width, height)
.corcolors(out$R, NItems, main, bottom, right, diag=NULL,
pdf_file, width, height)
}
title_scales <- " FACTOR / SCALE COMPOSITION"
txlbl <- ""
tx <- character(length = 0)
anyLabels <- FALSE
for (i in 1:NItems) {
options(xname = nm_new[i])
if (!is.null(.getlabels()$xl)) anyLabels <- TRUE
}
for (i in 1:NF) {
tx[length(tx)+1] <- paste("F", toString(i), sep="")
if (max.fname > 0)
tx[length(tx)] <- paste(tx[length(tx)], " - ", fac.names[i], sep="")
if (!anyLabels) {
for (j in LblCut[i,1]:LblCut[i,2])
tx[length(tx)] <- paste(tx[length(tx)], " ", nm_new[j])
}
else {
tx[length(tx)+1] <- .dash2(30)
for (j in LblCut[i,1]:LblCut[i,2]) {
options(xname = nm_new[j])
tx[length(tx)+1] <- paste(nm_new[j], ": ", xW(.getlabels()$xl), sep="")
}
}
if (i < NF) tx[length(tx)+1] <- ""
}
txlbl <- tx
title_rel <- " RELIABILITY ANALYSIS"
txrel <- ""
tx <- character(length = 0)
if (iter > 0) {
buf <- ifelse(max.fname > 0, max.fname+6, 3)
tx[length(tx)+1] <- paste(" Scale", paste(rep(" ", buf), collapse=""),
"Alpha Omega", sep="")
tx[length(tx)+1] <- .dash2(23)
if (max.fname > 0) tx[length(tx)] <- paste(tx[length(tx)],
.dash2(max.fname+3), sep="")
}
else {
tx[length(tx)+1] <- " Scale Alpha"
tx[length(tx)+1] <- .dash2(14)
}
for (i in 1:NF) {
Fnm <- paste("F", as.character(i), sep="")
tx[length(tx)+1] <- paste(" ", Fnm)
if (max.fname > 0)
tx[length(tx)] <- paste(tx[length(tx)], " - ",
.fmtc(fac.names[i], w=max.fname, j="left"), sep="")
tx[length(tx)] <- paste(tx[length(tx)], " ", .fmt(out$Alpha[i],3, w=6))
if (iter > 0)
tx[length(tx)] <- paste(tx[length(tx)], " ", .fmt(out$Omega[i],3, w=6))
else
out$Omega <- NULL
}
txrel <- tx
title_sol <- " SOLUTION"
txind <- ""
tx <- character(length = 0)
if (iter > 0 ) {
MaxLbl <- NItems
buf <- max.chr - 4
if (buf < 0) buf <- 0
if (is.null(options()$knitr.in.progress))
tx[length(tx)+1] <- paste('Indicator Analysis\n')
tx[length(tx)+1] <- paste('Fac', ' Indi', .fmtc(" ",buf+1), 'Pat', ' Unique',
' Factors with which an indicator correlates too')
tx[length(tx)+1] <- paste('tor', ' cator', .fmtc("",buf), 'tern', ' ness',
' highly, and other indicator diagnostics.')
tx[length(tx)+1] <- .dash2(75)
for (IFac in 1:NF) {
tx[length(tx)+1] <- ""
Fnm <- paste("F", as.character(IFac), sep="")
for (Item in LblCut[IFac,1]:LblCut[IFac,2]) {
Bad <- integer(length=0)
Lam <- out$R[NItems+IFac,Item]
Unique <- 1 - Lam**2
if (Lam <= 0)
unq <- " xxxx"
else
unq <- .fmt(Unique,3,7)
tx[length(tx)+1] <- paste(.fmtc(Fnm,3), .fmtc(nm_new[Item],max.chr),
.fmt(Lam,3,7), unq, " ")
if (Lam>0 && Unique>0) {
for (I in 1:NF)
if (abs(out$R[NItems+I,Item]) > Lam) Bad[length(Bad)+1] <- I
if (length(Bad) > 0) for (IBad in 1:length(Bad))
tx[length(tx)] <- paste(tx[length(tx)], paste("F", Bad[IBad], " ",
sep=""))
}
else if (Lam <= 0)
tx[length(tx)] <- paste(tx[length(tx)],
'** Negative Loading on Own Factor **')
else if (Unique <= 0) {
if (LblCut[IFac,2]-LblCut[IFac,1] > 0)
tx[length(tx)] <- paste(tx[length(tx)], '** Improper Loading **')
else
tx[length(tx)] <- paste(tx[length(tx)],
'** Factor Defined by Only One Item **')
}
Bad <- rep(0, NItems)
}
}
txind <- tx
}
txsol <- ""
tx <- character(length = 0)
if (iter > 0)
if (is.null(options()$knitr.in.progress))
tx[length(tx)+1] <- "Factor / Item Correlations \n"
else
if (is.null(options()$knitr.in.progress))
tx[length(tx)+1] <- "Item-Scale and Scale-Scale Correlations\n"
if(grid) boundary <- LblCut[,2] else boundary <- NULL
if (item_cor)
txcrs <- .prntbl(out$R, 2, cut=min_cor, cc=NULL, cors=TRUE, bnd=boundary)
else
txcrs <- .prntbl(out$R[1:NVTot,(NItems+1):NVTot], 2, cut=min_cor,
cc=NULL, cors=TRUE, bnd=boundary)
for (i in 1:length(txcrs)) tx[length(tx)+1] <- txcrs[i]
txsol <- tx
title_res <- " RESIDUALS"
txres <- ""
txrst <- ""
if (resid) {
tx <- character(length = 0)
phi <- out$R[(NItems+1):(NItems+NF), (NItems+1):(NItems+NF)]
lambda <- matrix(0, nrow=NItems, ncol=NF)
iFac <- 1
for (i in 1:NItems) {
if (i > LblCut[iFac,2]) iFac <- iFac + 1
lambda[i, iFac] <- out$R[i, NItems+iFac]
}
est <- lambda %*% phi %*% t(lambda)
diag(est) <- 1.0
est <- round(est, 5)
colnames(est) <- row.names(out$R[1:NItems,1:NItems])
rownames(est) <- colnames(est)
res <- out$R[1:NItems,1:NItems] - est
diag(res) <- 0
res <- round(res, 5)
if (is.null(options()$knitr.in.progress))
tx[length(tx)+1] <- "Item residuals\n"
txcrs <- .prntbl(res, 2, cut=min_res, cc=NULL, cors=TRUE, bnd=boundary)
for (i in 1:length(txcrs)) tx[length(tx)+1] <- txcrs[i]
txres <- tx
tx <- character(length = 0)
if (is.null(options()$knitr.in.progress))
tx[length(tx)+1] <- "Residual summaries\n"
tx[length(tx)+1] <- paste(.fmtc(" ", max.chr+2), "Sum of Average", "\n",
.fmtc(" ", max.chr+2), "Squares Abs Value", "\n",
.fmtc(" ", max.chr+2), "------- ---------", sep="")
cc <- as.character(dimnames(res)[[1]])
res_avg <- double(length=NItems)
ssq.tot <- 0
abv.tot <- 0
abv.all <- 0
for (i in 1:NItems) {
ssq <- 0
abv <- 0
for (j in 1:NItems) {
ssq <- ssq + res[i,j]^2
abv <- abv + abs(res[i,j])
abv.all <- abv.all + abs(res[i,j])
}
ssq.tot <- ssq.tot + ssq
res_avg[i] <- abv / (NItems - 1)
tx[length(tx)+1] <- paste(.fmtc(cc[i],max.chr), " ", .fmt(ssq,3), " ",
.fmt(res_avg[i],3))
}
abv.all.tot <- abv.all / (NItems^2 - NItems)
tx[length(tx)+1] <- ""
tx[length(tx)+1] <- paste("Total sum of squares for all items:",
.fmt(ssq.tot,3), "\n")
tx[length(tx)+1] <- paste("Root mean square residual for all items:",
.fmt(sqrt(ssq.tot),3), "\n")
tx[length(tx)+1] <- paste("Average absolute residual w/o the diagonal:",
.fmt(abv.all.tot,3), "\n\n")
txrst <- tx
}
else {
res <- NULL
est <- NULL
}
title_lvn <- " LAVAAN SPECIFICATION"
txlvn <- ""
if (iter > 0) {
tx <- character(length = 0)
if (is.null(mimm)) nm.mimm <- "MeasModel"
tx[length(tx)+1] <- paste(nm.mimm, " <-")
tx[length(tx)+1] <- paste("\"")
for (i in 1:NF) {
if (max.fname > 0) nmF[i] <- fac.names[i]
tx[length(tx)+1] <- paste(" ", nmF[i], " =~", sep="")
for (j in LblCut[i,1]:LblCut[i,2]) {
if (j == LblCut[i,1])
tx[length(tx)] <- paste(tx[length(tx)], " ", nm_new[j], sep="")
else
tx[length(tx)] <- paste(tx[length(tx)], "+", nm_new[j])
}
}
tx[length(tx)+1] <- paste("\"\n")
tx[length(tx)+1] <- paste("library(lavaan)")
tx[length(tx)+1] <- paste("fit <- lavaan::cfa(", nm.mimm, ", data=d,",
" std.ov=TRUE, std.lv=TRUE)", sep="")
tx[length(tx)+1] <- "summary(fit, fit.measures=TRUE)"
tx[length(tx)+1] <- ""
tx[length(tx)+1] <- "--------"
tx[length(tx)+1] <- paste(">>> The preceding code fits the model from",
"data frame: d")
tx[length(tx)+1] <- paste(">>> To access the correlation matrix",
"directly without the data")
tx[length(tx)+1] <- paste(">>> use the following fit statement instead.\n")
tx[length(tx)+1] <- paste("fit <- lavaan::cfa(", nm.mimm,
", sample.cov=mycor$R,", " sample.nobs=nnn, std.lv=TRUE)\n", sep="")
tx[length(tx)+1] <- ">>> mycor: name of correlation matrix"
tx[length(tx)+1] <- ">>> nnn: numeric, number of observations"
txlvn <- tx
}
txkfl <- ""
if (!is.null(Rmd)) {
if (!grepl(".Rmd", Rmd)) Rmd <- paste(Rmd, ".Rmd", sep="")
txknt <- .corfa.Rmd(mimm, nm.mimm, dname, fun_call, NItems, NF,
iter, item_cor, explain, interpret, results)
cat(txknt, file=Rmd, sep="\n")
txkfl <- .showfile2(Rmd, "R Markdown instructions")
}
class(title_scales) <- "out"
class(txlbl) <- "out"
class(title_rel) <- "out"
class(txrel) <- "out"
class(title_sol) <- "out"
class(txind) <- "out"
class(txsol) <- "out"
class(title_res) <- "out"
class(txres) <- "out"
class(txrst) <- "out"
class(title_lvn) <- "out"
class(txlvn) <- "out"
class(txkfl) <- "out"
output <- list(
call=fun_call,
out_title_scales=title_scales, out_labels=txlbl,
out_title_rel=title_rel, out_reliability=txrel,
out_title_solution=title_sol, out_indicators=txind, out_solution=txsol,
out_title_residuals=title_res, out_residuals=txres, out_res_stats=txrst,
out_title_lavaan=title_lvn, out_lavaan=txlvn, out_Rmd=txkfl,
ff.cor=out$R[(NItems+1):NVTot,(NItems+1):NVTot],
if.cor=out$R[1:NItems,(NItems+1):NVTot],
diag.cor=diag(out$R[1:NItems,1:NItems]),
alpha=out$Alpha,
omega=out$Omega,
pred=est,
resid=res
)
}
class(output) <- "out_all"
return(output)
} |
mcmapply <-
function(FUN, ..., MoreArgs = NULL, SIMPLIFY = TRUE, USE.NAMES = TRUE,
mc.preschedule = TRUE, mc.set.seed = TRUE,
mc.silent = FALSE, mc.cores = getOption("mc.cores", 2L),
mc.cleanup = TRUE)
{
FUN <- match.fun(FUN)
dots <- list(...)
if(!length(dots)) return(list())
lens <- sapply(dots, length)
n <- max(lens)
if(n && min(lens) == 0L)
stop("Zero-length inputs cannot be mixed with those of non-zero length")
answer <- if(n < 2L) .mapply(FUN, dots, MoreArgs)
else {
X <- if (!all(lens == n))
lapply(dots, function(x) rep(x, length.out = n))
else dots
do_one <- function(indices, ...) {
dots <- lapply(X, function(x) x[indices])
.mapply(FUN, dots, MoreArgs)
}
answer <- mclapply(seq_len(n), do_one, mc.preschedule = mc.preschedule,
mc.set.seed = mc.set.seed, mc.silent = mc.silent,
mc.cores = mc.cores, mc.cleanup = mc.cleanup)
do.call(c, answer)
}
if (USE.NAMES && length(dots)) {
if (is.null(names1 <- names(dots[[1L]])) && is.character(dots[[1L]]))
names(answer) <- dots[[1L]]
else if (!is.null(names1))
names(answer) <- names1
}
if (!identical(SIMPLIFY, FALSE) && length(answer))
simplify2array(answer, higher = (SIMPLIFY == "array"))
else answer
}
mcMap <- function (f, ...)
{
f <- match.fun(f)
mcmapply(f, ..., SIMPLIFY = FALSE, mc.silent = TRUE)
} |
setClass("dat", representation(psi.df = "matrix", psi.weight = "matrix",
x = "vector", nt = "integer", x2 = "vector", nl = "integer",
C2 = "matrix", E2 = "matrix", sigma = "numeric",
mod_type = "character", parnames = "vector",dummy="vector",
simdata = "logical", weightpar = "list",
weight = "logical", weightM = "matrix", weightsmooth = "list",
fixed = "list", clp0 = "list", makeps = "character",
clpequspec = "list", lclp0 = "logical", lclpequ = "logical",
title = "character", mhist = "list", datCall = "list",
dscalspec = "list", mvecind = "vector", dscal = "list",
drel = "vector", clpequ = "vector", scalx = "numeric",
prel = "vector", prelspec = "list", fvecind = "vector",
pvecind = "vector", nvecind = "vector",
iter = "numeric", free = "list",
clpCon = "list", ncomp = "numeric", clpdep = "logical",
inten = "matrix", positivepar="vector", constrained ="list",
clinde = "list", chinde = "list", highcon = "vector",
lowcon = "vector", datafile = "character", getX = "logical",
clpType = "character", clpequspecBD = "list", compnames = "vector",
getXsuper = "logical", usecompnames0 = "logical",
usecompnamesequ = "logical", autoclp0 = "list", cohcol = "numeric",
weightList = "list", outMat = "matrix", satMat = "matrix",
lscalpar = "logical", thetascal = "vector"),
prototype = list(psi.df = matrix(), psi.weight = matrix(),
x = vector(), nt = integer(), x2 = vector(), nl = integer(),
C2 = matrix(), E2 = matrix(), sigma = numeric(),
mod_type = character(), simdata = logical(),
weightpar = list(), weight = FALSE, weightM = matrix(),
weightsmooth = list(), fixed = list(), clp0 = list(),
clpequspec = list(), clpCon = list(), lclp0 = logical(),
lclpequ = logical(), makeps = character(), title = character(),
mhist = list(), datCall = list(), nvecind = vector(),
dscal = list(), drel = vector(), mvecind = vector(),
scalx = 1, prel = vector(), prelspec = list(), fvecind = vector(),
pvecind = vector(), clpequ = vector(), free = list(),
iter = 1, ncomp = numeric(), clpdep = logical(),inten = matrix(),
parnames=vector(), dummy=vector(), positivepar=vector(), constrained =list(),
clinde = list(), chinde = list(), highcon = vector(),
lowcon = vector(), datafile = "", getX = FALSE,
clpType = "", clpequspecBD = list(), compnames = vector(),
getXsuper = FALSE, usecompnames0 = FALSE,
usecompnamesequ = FALSE, autoclp0 = list(), cohcol = 0,
weightList = list(), outMat = matrix(), satMat = matrix(),
lscalpar = FALSE, thetascal = vector() ))
setClass("kin", representation("dat", kinpar = "vector", specpar =
"list", seqmod = "logical", irf = "logical", mirf = "logical", reftau
= "numeric", measured_irf = "vector", convalg =
"numeric", irffun = "character", irfpar = "vector", cohirf = "vector",
dispmu = "logical", dispmufun = "character", anipar = "vector", parmu
= "list", disptau = "logical", disptaufun = "character", partau =
"vector", fullk = "logical", kmat = "array", jvec = "vector",
anispec = "list", ncolc = "vector",
kinscal = "vector", kmatfit = "array", cohspec = "list", coh = "vector",
oscspec = "list", oscpar = "vector",
wavedep = "logical", lambdac = "numeric", speckin2 = "list",
usekin2 = "logical", kinpar2 = "vector",
kin2scal = "vector", amplitudes = "vector", streakT = "numeric",
streak="logical", doublegaus = "logical", multiplegaus = "logical", fixedkmat="logical",
kinscalspecialspec ="list", kinscalspecial = "list",
lightregimespec = "list",
numericalintegration = "logical", initialvals = "vector",
reactantstoichiometrymatrix = "vector",
stoichiometrymatrix = "vector"),
prototype = list( kinpar = vector(), seqmod =
TRUE, irf = FALSE, mirf = FALSE, measured_irf = vector(), convalg = 1,
cohirf = vector(), irffun = "gaus", anispec = list(), irfpar =
vector(), dispmu = FALSE, dispmufun = "poly", parmu = list(), anipar =
vector(), disptaufun = "poly", reftau = 0, specpar = list(),
partau = vector(), disptau = FALSE, fullk = FALSE,
kmat = array(), jvec = vector(),
ncolc = vector(), kinscal = vector(), kmatfit = array(),
cohspec = list(), coh = vector(), oscspec = list(), oscpar = vector(), wavedep = logical(),
lambdac = numeric(), speckin2 = list(), usekin2 = FALSE,
kinpar2 = vector(), kin2scal = vector(), amplitudes = vector(),
streakT = 0, streak = FALSE, doublegaus = FALSE, multiplegaus = FALSE, fixedkmat=FALSE,
kinscalspecialspec = list(), kinscalspecial = list(),
lightregimespec = list(), numericalintegration = FALSE,
initialvals = vector(),
reactantstoichiometrymatrix = vector(),
stoichiometrymatrix = vector()
))
setClass("mass",
representation("kin",
peakpar = "list",
amplitudes = "vector",
peakfunct = "character",
lzerofile = "character",
extracomp = "logical",
shift = "vector"),
prototype = list(
peakpar = list(),
peakfunct = "expmodgaus",
lzerofile = "",
amplitudes = vector(),
getX=TRUE,
extracomp = TRUE,
shift = vector() )
)
setClass("spec", representation("dat", clpequ = "vector",
specpar = "list", specfun = "character", specref = "numeric",
specCon = "list", ncole = "vector", specdisp = "logical",
specdisppar = "list", specdispindex = "vector", nupow = "numeric",
timedep = "logical", parmufunc = "character"),
prototype = list(specpar = list(), specfun = "gaus",
specCon = list(), ncole = vector(), specdisp = FALSE,
specdisppar = list(), specdispindex = vector(), specref =
numeric(), nupow = 5, clpequ = vector(),
timedep = FALSE, parmufunc = "poly"))
setClass("amp", representation("dat", amps = "list"),
prototype = list(amps = list(), clpdep = FALSE)) |
weighted.pred.env <- function(X, Y, Xnew) {
X <- as.matrix(X)
a <- dim(Y)
n <- a[1]
r <- a[2]
p <- ncol(X)
Xnew <- as.matrix(Xnew)
if (nrow(Xnew) == 1) Xnew <- t(Xnew)
A <- qr.Q(qr(Xnew), complete = TRUE)
Ainv <- solve(A)
Z <- tcrossprod(X, Ainv)
X1 <- Z[, 1]
X2 <- Z[, 2:p]
fit <- weighted.penv(X1, X2, Y)
X1new <- Ainv[1, ] %*% Xnew
X2new <- Ainv[2:p, ] %*% Xnew
value <- fit$mu + fit$beta1 %*% X1new + fit$beta2 %*% X2new
return(value)
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(data.table)
data.table(iris)
iris.parts <- list(
nc=nc::capture_melt_multiple(
iris,
column=".*?",
"[.]",
dim=".*"),
tidyr=if(requireNamespace("tidyr"))tidyr::pivot_longer(
iris,
cols=1:4,
names_to=c(".value", "dim"),
names_sep="[.]"),
stats=stats::reshape(
iris,
direction="long",
timevar="dim",
varying=1:4,
sep="."),
"data.table::melt"=melt(
data.table(iris),
measure.vars=patterns(
Sepal="^Sepal",
Petal="^Petal")
)[data.table(
variable=factor(1:2), dim=c("Length", "Width")
), on=.(variable)],
if(requireNamespace("cdata"))cdata::rowrecs_to_blocks(
iris,
controlTable=data.frame(
dim=c("Length", "Width"),
Petal=c("Petal.Length", "Petal.Width"),
Sepal=c("Sepal.Length", "Sepal.Width"),
stringsAsFactors=FALSE),
columnsToCopy="Species"))
iris.parts$nc
if(require(ggplot2)){
ggplot()+
theme_bw()+
theme(panel.spacing=grid::unit(0, "lines"))+
facet_grid(dim ~ Species)+
coord_equal()+
geom_abline(slope=1, intercept=0, color="grey")+
geom_point(aes(
Petal, Sepal),
data=iris.parts$nc)
}
iris.dims <- list(
nc=nc::capture_melt_multiple(
iris,
part=".*?",
"[.]",
column=".*"),
stats=stats::reshape(
structure(iris, names=sub("(.*?)[.](.*)", "\\2.\\1", names(iris))),
direction="long",
timevar="part",
varying=1:4,
sep="."))
iris.dims$nc
if(require(ggplot2)){
ggplot()+
theme_bw()+
theme(panel.spacing=grid::unit(0, "lines"))+
facet_grid(part ~ Species)+
coord_equal()+
geom_abline(slope=1, intercept=0, color="grey")+
geom_point(aes(
Length, Width),
data=iris.dims$nc)
}
TC <- data.table::data.table(
age.treatment=c(1, 5),
sex.control=c("M", "M"),
sex.treatment=c("F", "F"),
age.control=c(10, 50))
input.list <- list(
"nc"=nc::capture_melt_multiple(
TC,
column=".*?",
"[.]",
group=".*"),
"cdata"=if(requireNamespace("cdata"))cdata::rowrecs_to_blocks(
TC,
controlTable=data.frame(
group=c("treatment", "control"),
age=c("age.treatment", "age.control"),
sex=c("sex.treatment", "sex.control"),
stringsAsFactors=FALSE)),
"data.table"=data.table::melt(TC, measure.vars=patterns(
age="age",
sex="sex")),
"stats"=stats::reshape(
TC,
varying=1:4,
direction="long"),
"tidyr"=if(requireNamespace("tidyr"))tidyr::pivot_longer(
TC, 1:4,
names_to=c(".value", "group"),
names_sep="[.]"))
output.list <- list()
for(pkg in names(input.list)){
df.or.null <- input.list[[pkg]]
if(is.data.frame(df.or.null)){
output.list[[pkg]] <- data.table::data.table(df.or.null)[order(age)]
}
}
output.list
sapply(output.list, function(DT)identical(DT$sex, c("F", "F", "M", "M")))
if(requireNamespace("tidyr")){
data(who, package="tidyr")
}else{
who <- data.frame(id=1, new_sp_m5564=2, newrel_f65=3)
}
names(who)
who.chr.list <- list(
nc=nc::capture_melt_single(
who,
"new_?",
diagnosis=".*",
"_",
gender=".",
ages=".*"),
tidyr=if(requireNamespace("tidyr"))tidyr::pivot_longer(
who,
new_sp_m014:newrel_f65,
names_to=c("diagnosis", "gender", "ages"),
names_pattern="new_?(.*)_(.)(.*)"))
who.pattern <- "new_?(.*)_(.)((0|[0-9]{2})([0-9]{0,2}))"
as.numeric.Inf <- function(y)ifelse(y=="", Inf, as.numeric(y))
who.typed.list <- list(
nc=nc::capture_melt_single(
who,
"new_?",
diagnosis=".*",
"_",
gender=".",
ages=list(
ymin.num="0|[0-9]{2}", as.numeric,
ymax.num="[0-9]{0,2}", as.numeric.Inf),
value.name="count",
na.rm=TRUE),
tidyr=if(requireNamespace("tidyr"))try(tidyr::pivot_longer(
who,
cols=grep(who.pattern, names(who)),
names_transform=list(
ymin.num=as.numeric,
ymax.num=as.numeric.Inf),
names_to=c("diagnosis", "gender", "ages", "ymin.num", "ymax.num"),
names_pattern=who.pattern,
values_drop_na=TRUE,
values_to="count")))
str(who.typed.list)
if(requireNamespace("tidyr")){
gather.result <- tidyr::gather(
who,
"variable",
"count",
grep(who.pattern, names(who)),
na.rm=TRUE)
extract.result <- tidyr::extract(
gather.result,
"variable",
c("diagnosis", "gender", "ages", "ymin.int", "ymax.int"),
who.pattern,
convert=TRUE)
transform.result <- base::transform(
extract.result,
ymin.num=as.numeric(ymin.int),
ymax.num=ifelse(is.na(ymax.int), Inf, as.numeric(ymax.int)))
str(transform.result)
}
reshape2.result <- if(requireNamespace("reshape2")){
reshape2:::melt.data.frame(
who,
measure.vars=grep(who.pattern, names(who)),
na.rm=TRUE,
value.name="count")
}
dt.result <- data.table::melt.data.table(
data.table(who),
measure.vars=patterns(who.pattern),
na.rm=TRUE,
value.name="count")
who.df <- data.frame(who)
is.varying <- grepl(who.pattern, names(who))
names(who.df)[is.varying] <- paste0("count.", names(who)[is.varying])
stats.result <- stats::reshape(
who.df,
direction="long",
timevar="variable",
varying=is.varying)
if(requireNamespace("cdata")){
cdata.result <- cdata::rowrecs_to_blocks(
who,
cdata::build_unpivot_control(
"variable",
"count",
grep(who.pattern, names(who), value=TRUE)),
columnsToCopy=grep(who.pattern, names(who), value=TRUE, invert=TRUE))
}
library(data.table)
iris.dt <- data.table(
i=1:nrow(iris),
iris[,1:4],
Species=paste(iris$Species))
print(iris.dt)
set.seed(1)
iris.rand <- iris.dt[sample(.N)]
iris.wide <- cbind(treatment=iris.rand[1:75], control=iris.rand[76:150])
print(iris.wide, topn=2, nrows=10)
iris.melted <- melt(iris.wide, value.factor=TRUE, measure.vars = patterns(
i="i$",
Sepal.Length="Sepal.Length$",
Sepal.Width="Sepal.Width$",
Petal.Length="Petal.Length$",
Petal.Width="Petal.Width$",
Species="Species$"))
identical(iris.melted[order(i), names(iris.dt), with=FALSE], iris.dt)
(nc.melted <- nc::capture_melt_multiple(
iris.wide,
group="[^.]+",
"[.]",
column=".*"))
identical(nc.melted[order(i), names(iris.dt), with=FALSE], iris.dt)
iris.wide.df <- data.frame(iris.wide)
names(iris.wide.df) <- sub("(.*?)[.](.*)", "\\2_\\1", names(iris.wide))
iris.reshaped <- stats::reshape(
iris.wide.df,
direction="long",
timevar="group",
varying=names(iris.wide.df),
sep="_")
identical(data.table(iris.reshaped[, names(iris.dt)])[order(i)], iris.dt)
parts.wide <- nc::capture_melt_multiple(
iris.wide,
group=".*?",
"[.]",
column=".*?",
"[.]",
dim=".*")
if(require("ggplot2")){
ggplot()+
theme_bw()+
theme(panel.spacing=grid::unit(0, "lines"))+
facet_grid(dim ~ group)+
coord_equal()+
geom_abline(slope=1, intercept=0, color="grey")+
geom_point(aes(
Petal, Sepal),
data=parts.wide)
}
DT <- data.table(
i_1 = c(1:5, NA),
i_2 = c(NA,6:10),
f_1 = factor(sample(c(letters[1:3], NA), 6, TRUE)),
f_2 = factor(c("z", "a", "x", "c", "x", "x"), ordered=TRUE),
c_1 = sample(c(letters[1:3], NA), 6, TRUE),
d_1 = as.Date(c(1:3,NA,4:5), origin="2013-09-01"),
d_2 = as.Date(6:1, origin="2012-01-01"))
DT[, l_1 := DT[, list(c=list(rep(i_1, sample(5,1)))), by = i_1]$c]
DT[, l_2 := DT[, list(c=list(rep(c_1, sample(5,1)))), by = i_1]$c]
melt(DT, measure=patterns(
i="^i",
f="^f",
d="^d",
l="^l"
))
nc::capture_melt_multiple(
DT,
column="^[^c]",
"_",
number="[12]")
melt(DT, id=1:2, measure=patterns(
f="^f",
l="^l"
))
nc::capture_melt_multiple(
DT,
column="^[fl]",
"_",
number="[12]")
reshape(
DT,
varying=grep("^[fid]", names(DT)),
sep="_",
direction="long",
timevar="number")
if(requireNamespace("tidyr")){
tidyr::pivot_longer(
DT, grep("[cf]", names(DT), invert=TRUE),
names_pattern="(.)_(.)",
names_to=c(".value", "number"))
}
family.dt <- fread(text="
family_id age_mother dob_child1 dob_child2 dob_child3 gender_child1 gender_child2 gender_child3
1 30 1998-11-26 2000-01-29 NA 1 2 NA
2 27 1996-06-22 NA NA 2 NA NA
3 26 2002-07-11 2004-04-05 2007-09-02 2 2 1
4 32 2004-10-10 2009-08-27 2012-07-21 1 1 1
5 29 2000-12-05 2005-02-28 NA 2 1 NA")
(children.melt <- melt(family.dt, measure = patterns(
dob="^dob", gender="^gender"
), na.rm=TRUE, variable.factor=FALSE))
(children.nc <- nc::capture_melt_multiple(
family.dt,
column="[^_]+",
"_",
nc::field("child", "", "[1-3]"),
na.rm=TRUE))
stats::reshape(
family.dt,
varying=grep("child", names(family.dt)),
direction="long",
sep="_",
timevar="child.str")
imat <- as.matrix(iris[, 1:4])
ylim <- range(table(imat))
xlim <- range(imat)
par(mfcol=c(2,2), mar=c(2,2,1,1))
for(col.i in 1:ncol(imat)){
hist(
imat[, col.i],
breaks=seq(xlim[1], xlim[2], by=0.1),
ylim=ylim,
main=colnames(imat)[col.i])
}
pen.peaks.wide <- data.table::data.table(
data.set=c("foo", "bar"),
"10.1"=c(5L, 10L),
"0.3"=c(26L, 39L))
pen.peaks.gather <- if(requireNamespace("tidyr"))tidyr::gather(
pen.peaks.wide,
"penalty",
"peaks",
-1,
convert=TRUE)
str(pen.peaks.gather)
pen.peaks.nc <- nc::capture_melt_single(
pen.peaks.wide,
penalty="^[0-9.]+", as.numeric,
value.name="peaks")
str(pen.peaks.nc)
pen.peaks.pivot <- if(requireNamespace("tidyr"))try(tidyr::pivot_longer(
pen.peaks.wide,
-1,
names_to="penalty",
names_transform=list(penalty=as.numeric),
values_to="peaks"))
str(pen.peaks.pivot)
varying <- 2:3
pen.peaks.reshape.times <- stats::reshape(
pen.peaks.wide,
direction="long",
varying=varying,
times=as.numeric(names(pen.peaks.wide)[varying]),
v.names="peaks",
timevar="penalty")
str(pen.peaks.reshape.times)
pen.peaks.renamed <- pen.peaks.wide
names(pen.peaks.renamed) <- paste0(ifelse(
grepl("^[0-9]", names(pen.peaks.wide)),
"peaks_", ""),
names(pen.peaks.wide))
pen.peaks.reshape.sep <- stats::reshape(
pen.peaks.renamed,
direction="long",
varying=varying,
sep="_",
timevar="penalty")
str(pen.peaks.reshape.sep) |
library(httr)
knitr::opts_chunk$set(comment = "
library(httr)
github_api <- function(path) {
url <- modify_url("https://api.github.com", path = path)
GET(url)
}
resp <- github_api("/repos/hadley/httr")
resp
GET("http://www.colourlovers.com/api/color/6B4106?format=xml")
GET("http://www.colourlovers.com/api/color/6B4106?format=json")
http_type(resp)
github_api <- function(path) {
url <- modify_url("https://api.github.com", path = path)
resp <- GET(url)
if (http_type(resp) != "application/json") {
stop("API did not return json", call. = FALSE)
}
resp
}
github_api <- function(path) {
url <- modify_url("https://api.github.com", path = path)
resp <- GET(url)
if (http_type(resp) != "application/json") {
stop("API did not return json", call. = FALSE)
}
jsonlite::fromJSON(content(resp, "text"), simplifyVector = FALSE)
}
github_api <- function(path) {
url <- modify_url("https://api.github.com", path = path)
resp <- GET(url)
if (http_type(resp) != "application/json") {
stop("API did not return json", call. = FALSE)
}
parsed <- jsonlite::fromJSON(content(resp, "text"), simplifyVector = FALSE)
structure(
list(
content = parsed,
path = path,
response = resp
),
class = "github_api"
)
}
print.github_api <- function(x, ...) {
cat("<GitHub ", x$path, ">\n", sep = "")
str(x$content)
invisible(x)
}
github_api("/users/hadley")
github_api <- function(path) {
url <- modify_url("https://api.github.com", path = path)
resp <- GET(url)
if (http_type(resp) != "application/json") {
stop("API did not return json", call. = FALSE)
}
parsed <- jsonlite::fromJSON(content(resp, "text"), simplifyVector = FALSE)
if (http_error(resp)) {
stop(
sprintf(
"GitHub API request failed [%s]\n%s\n<%s>",
status_code(resp),
parsed$message,
parsed$documentation_url
),
call. = FALSE
)
}
structure(
list(
content = parsed,
path = path,
response = resp
),
class = "github_api"
)
}
github_api("/user/hadley")
ua <- user_agent("http://github.com/hadley/httr")
ua
github_api <- function(path) {
url <- modify_url("https://api.github.com", path = path)
resp <- GET(url, ua)
if (http_type(resp) != "application/json") {
stop("API did not return json", call. = FALSE)
}
parsed <- jsonlite::fromJSON(content(resp, "text"), simplifyVector = FALSE)
if (status_code(resp) != 200) {
stop(
sprintf(
"GitHub API request failed [%s]\n%s\n<%s>",
status_code(resp),
parsed$message,
parsed$documentation_url
),
call. = FALSE
)
}
structure(
list(
content = parsed,
path = path,
response = resp
),
class = "github_api"
)
}
f <- function(x = c("apple", "banana", "orange")) {
match.arg(x)
}
f("a")
github_pat <- function() {
pat <- Sys.getenv('GITHUB_PAT')
if (identical(pat, "")) {
stop("Please set env var GITHUB_PAT to your github personal access token",
call. = FALSE)
}
pat
}
rate_limit <- function() {
github_api("/rate_limit")
}
rate_limit()
rate_limit <- function() {
req <- github_api("/rate_limit")
core <- req$content$resources$core
reset <- as.POSIXct(core$reset, origin = "1970-01-01")
cat(core$remaining, " / ", core$limit,
" (Resets at ", strftime(reset, "%H:%M:%S"), ")\n", sep = "")
}
rate_limit() |
library(dynbenchmark)
library(tidyverse)
experiment("02-metrics/optimise_feature_importance")
set.seed(1)
if (!file.exists(result_file("fimp_orig.rds"))) {
num_repeats <- 10
dataset_ids <- list_datasets() %>% sample_n(10) %>% pull(id)
datasets <- load_datasets(ids = dataset_ids, as_tibble = FALSE) %>% set_names(dataset_ids)
dataset_metadata <-
crossing(
dataset_id = dataset_ids,
repeat_ix = seq_len(num_repeats)
) %>%
pmap_df(function(dataset_id, repeat_ix) {
dataset <- datasets[[dataset_id]]
expr <- get_expression(dataset)
time0 <- Sys.time()
importances <- dynfeature::calculate_overall_feature_importance(traj = dataset, expression_source = expr)
time1 <- Sys.time()
execution_time <- difftime(time1, time0, units = "secs") %>% as.numeric()
tibble(
repeat_ix,
id = dataset$id,
nrow = nrow(expr),
ncol = ncol(expr),
importances = list(importances),
execution_time
)
})
write_rds(lst(dataset_ids, dataset_metadata, num_repeats), result_file("fimp_orig.rds"), compress = "xz")
}
list2env(read_rds(result_file("fimp_orig.rds")), .GlobalEnv)
datasets <- load_datasets(ids = dataset_ids, as_tibble = FALSE) %>% set_names(dataset_ids)
pairwise_cor_fun <- function(metadata1, metadata2, same = TRUE) {
cr <- crossing(
id = dataset_ids,
left = unique(metadata1$repeat_ix),
right = unique(metadata2$repeat_ix)
)
if (!same) {
cr <- cr %>% filter(left != right)
}
cr %>%
left_join(metadata1 %>% select(left = repeat_ix, id, importances_left = importances), by = c("id", "left")) %>%
left_join(metadata2 %>% select(right = repeat_ix, id, importances_right = importances), by = c("id", "right")) %>%
rowwise() %>%
mutate(cor = dyneval:::.calculate_featureimp_cor(importances_left, importances_right)$featureimp_wcor) %>%
ungroup() %>%
select(-importances_left, -importances_right)
}
pairwise_cor_orig_orig <- pairwise_cor_fun(dataset_metadata, dataset_metadata, same = FALSE)
pairwise_cor_dist <-
pairwise_cor_orig_orig %>%
group_by(id) %>%
summarise(mean = mean(cor), sd = sd(cor))
g1 <-
ggplot(pairwise_cor_orig_orig) +
geom_density(aes(cor, colour = id), size = 1) +
theme_bw() +
scale_colour_brewer(palette = "Set3") +
labs(title = "Pairwise cor of importance scores, orig vs. orig")
g1
ggsave(result_file("pairwise_cor_orig_orig.pdf"), g1, width = 10, height = 6)
g2 <-
crossing(pairwise_cor_dist, data_frame(cor = seq(min(pairwise_cor_orig_orig$cor), 1, by = .001))) %>%
mutate(dens = dnorm(cor, mean, sd)) %>%
ggplot() +
geom_line(aes(cor, dens, colour = id), size = 1) +
theme_bw() +
scale_colour_brewer(palette = "Set3") +
labs(title = "Estimated densities of pairwise cors")
g2
ggsave(result_file("pairwise_cor_orig_orig_dens.pdf"), g2, width = 10, height = 6)
library(mlrMBO)
num_cores <- 1
num_iters <- 200
design <-
if (!file.exists(result_file("opt_path.rds"))) {
crossing(
num_trees = c(500, 1000, 2000),
num_mtry = c(20, 50, 100),
num_sample = c(50, 100, 200),
min_node_size = c(1, 5, 10)
) %>%
sample_n(30)
} else {
read_rds(result_file("opt_path.rds"))
}
im_lazy_and_just_want_to_run_the_plots_below <-
TRUE && "score" %in% colnames(design)
if (im_lazy_and_just_want_to_run_the_plots_below) {
opt_path <- design
} else {
param_set <- ParamHelpers::makeParamSet(
ParamHelpers::makeNumericParam(id = "num_trees", lower = log10(100), upper = log10(10000), trafo = function(x) round(10 ^ x)),
ParamHelpers::makeNumericParam(id = "num_mtry", lower = log10(5), upper = log10(500), trafo = function(x) round(10 ^ x)),
ParamHelpers::makeNumericParam(id = "num_sample", lower = log10(10), upper = log10(600), trafo = function(x) round(10 ^ x)),
ParamHelpers::makeIntegerParam(id = "min_node_size", lower = 1L, upper = 20L)
)
scale_execution_time <- function(x) {
1 - pmin(x / 1800, 1)
}
obj_fun <-
smoof::makeSingleObjectiveFunction(
name = "TItrain",
vectorized = FALSE,
minimize = FALSE,
has.simple.signature = FALSE,
par.set = param_set,
fn = function(x) {
num_repeats <- 1
new_metadata <-
crossing(
dataset_id = dataset_ids,
repeat_ix = seq_len(num_repeats)
) %>%
pmap_df(function(dataset_id, repeat_ix) {
dataset <- datasets[[dataset_id]]
expr <- get_expression(dataset)
method_params <- list(
num.trees = x$num_trees,
mtry = min(x$num_mtry, ncol(expr)),
sample.fraction = min(x$num_sample / nrow(expr), 1),
min.node.size = x$min_node_size,
splitrule = x$splitrule,
write.forest = FALSE
)
time0 <- Sys.time()
importances <- dynfeature::calculate_overall_feature_importance(
traj = dataset,
expression_source = expr,
method_params = method_params
)
time1 <- Sys.time()
execution_time <- difftime(time1, time0, units = "secs") %>% as.numeric()
tibble(
repeat_ix,
id = dataset$id,
importances = list(importances),
execution_time
)
})
execution_time <-
new_metadata %>%
group_by(id) %>%
summarise(mean = mean(execution_time)) %>%
summarise(sum = sum(mean)) %>%
pull(sum)
pnorm_cor <-
pairwise_cor_fun(dataset_metadata, new_metadata) %>%
mutate(cor = ifelse(is.finite(cor), cor, 0)) %>%
left_join(pairwise_cor_dist, by = "id") %>%
mutate(pnorm_cor = pnorm(cor, mean, sd)) %>%
summarise(mean = mean(pnorm_cor)) %>%
pull(mean)
summary <-
as_data_frame(x) %>%
mutate(
pnorm_cor,
execution_time,
execution_time_score = scale_execution_time(execution_time),
score = dyneval::calculate_geometric_mean(pnorm_cor, execution_time_score)
)
score <- summary$score
attr(score, "extras") <- list(
.summary = summary
)
score
}
)
progress_file <- derived_file("mlr_progress.RData")
if (file.exists(progress_file)) file.remove(progress_file)
control <-
mlrMBO::makeMBOControl(
n.objectives = 1,
propose.points = num_cores,
save.file.path = progress_file,
save.on.disk.at = seq(0, num_iters + 1, by = 1),
y.name = "score"
) %>%
mlrMBO::setMBOControlTermination(iters = num_iters)
if (length(control$y.name) == 1) {
control <- control %>%
mlrMBO::setMBOControlInfill(mlrMBO::makeMBOInfillCritCB())
} else {
control <- control %>%
mlrMBO::setMBOControlInfill(mlrMBO::makeMBOInfillCritDIB())
}
mbo_out <-
mlrMBO::mbo(
fun = obj_fun,
control = control,
design = design %>%
select(one_of(c(names(param_set$pars), "score"))) %>%
mutate_at(c("num_trees", "num_mtry", "num_sample"), log10),
show.info = TRUE
)
mbo_out <- mboFinalize(progress_file)
opt_path <-
bind_rows(
{ if ("score" %in% colnames(design)) design else NULL },
mbo_out$opt.path$env$extra %>%
map_df(~ .$.summary)
)
write_rds(opt_path, result_file("opt_path.rds"), compress = "xz")
}
opt_path <- read_rds(result_file("opt_path.rds"))
g1 <-
ggplot(opt_path, aes(pnorm_cor, execution_time)) +
geom_point(aes(colour = score), size = 3) +
theme_bw() +
viridis::scale_colour_viridis()
ggsave(result_file("result_cor_vs_time.pdf"), g1, width = 6, height = 5)
g2 <-
ggplot(opt_path %>% gather(parameter, value, num_trees:min_node_size, execution_time)) +
geom_point(aes(value, pnorm_cor, colour = score), size = 3) +
theme_bw() +
viridis::scale_colour_viridis() +
facet_wrap(~parameter, scales = "free")
ggsave(result_file("result_param_vs_cor.pdf"), g2, width = 12, height = 8)
g1
g2
opt_path %>% arrange(desc(pnorm_cor))
opt_path %>% arrange(desc(score))
g3data <-
opt_path %>%
mutate(iteration = seq_len(n()), score_ = score) %>%
gather(parameter, value, num_trees:min_node_size, execution_time, pnorm_cor, score) %>%
mutate(param_label = paste0(ifelse(parameter %in% c("pnorm_cor", "execution_time", "score"), "SCORE", "PARAM"), ": ", parameter)) %>%
rename(score = score_)
g3 <-
ggplot(g3data) +
geom_point(aes(score, value, colour = iteration), size = 2) +
viridis::scale_colour_viridis() +
facet_wrap(~param_label, scales = "free", dir = "v", nrow = 1) +
theme_bw()
g4 <-
ggplot(g3data) +
geom_point(aes(iteration, value, colour = score), size = 2) +
viridis::scale_colour_viridis() +
facet_wrap(~param_label, scales = "free", dir = "v", nrow = 1) +
theme_bw()
ggsave(result_file("result_compare_params_and_scores.pdf"), patchwork::wrap_plots(g3, g4, ncol = 1), width = 20, height = 6)
many_params <- list(
list(name = "local", num_trees = 1900, num_mtry = 44, num_sample = 266, min_node_size = 19),
list(name = "qsub1", num_trees = 1400, num_mtry = 42, num_sample = 142, min_node_size = 8),
list(name = "qsub2", num_trees = 3400, num_mtry = 50, num_sample = 92, min_node_size = 19),
list(name = "qsub3", num_trees = 2000, num_mtry = 42, num_sample = 175, min_node_size = 11),
list(name = "manual", num_trees = 2000, num_mtry = 50, num_sample = 250, min_node_size = 20)
)
out <- map(many_params, function(params) {
scale_execution_time <- function(x) {
1 - pmin(x / 1800, 1)
}
cat(params$name, "\n", sep = "")
new_metadata <-
crossing(
dataset_id = dataset_ids,
repeat_ix = seq_len(num_repeats)
) %>%
pmap_df(function(dataset_id, repeat_ix) {
dataset <- datasets[[dataset_id]]
expr <- get_expression(dataset)
method_params <- list(
num.trees = params$num_trees,
mtry = min(params$num_mtry, ncol(expr)),
sample.fraction = min(params$num_sample / nrow(expr), 1),
min.node.size = params$min_node_size,
write.forest = FALSE
)
time0 <- Sys.time()
importances <- dynfeature::calculate_overall_feature_importance(
traj = dataset,
expression_source = expr,
method_params = method_params
)
time1 <- Sys.time()
execution_time <- difftime(time1, time0, units = "secs") %>% as.numeric()
tibble(
repeat_ix,
id = dataset$id,
importances = list(importances),
execution_time
)
})
execution_time <-
new_metadata %>%
group_by(id) %>%
summarise(mean = mean(execution_time)) %>%
summarise(sum = sum(mean)) %>%
pull(sum)
pnorm_cor <-
pairwise_cor_fun(new_metadata, new_metadata) %>%
mutate(cor = ifelse(is.finite(cor), cor, 0)) %>%
left_join(pairwise_cor_dist, by = "id") %>%
mutate(pnorm_cor = pnorm(cor, mean, sd)) %>%
summarise(mean = mean(pnorm_cor)) %>%
pull(mean)
summary <-
as_data_frame(params) %>%
mutate(
name = params$name,
pnorm_cor,
execution_time,
execution_time_score = scale_execution_time(execution_time),
score = dyneval::calculate_geometric_mean(pnorm_cor, execution_time_score)
)
lst(params, new_metadata, summary)
})
summary <- map_df(out, ~ .$summary) |
histogram.btmix <- function(x, data, root = TRUE, ...) {
if(!missing(data)) warning("argument 'data' is ignored")
getMethod("plot", c(x = "flexmix", y = "missing"))(as(x, "flexmix"), root = root, ...)
}
xyplot.btmix <- function(x, data,
component = NULL, plot.type = c("multiple", "single"),
auto.key = NULL, type = "b", lty = NULL, xlab = "Objects", ylab = "Worth parameters",
panel = NULL, scales = NULL, ...)
{
y <- worth(x)
if(!missing(data)) warning("'data' argument is ignored")
nitem <- nrow(y)
ymean <- mean(y[, 1L])
lab <- labels(x)
if(is.null(component)) component <- 1:NCOL(y)
y <- y[, component, drop = FALSE]
d <- data.frame(
y = as.vector(y),
x = rep(1:nitem, length(component)),
z = factor(rep(component, each = nitem),
levels = component, labels = paste("Comp.", component))
)
plot.type <- match.arg(plot.type)
if(plot.type == "single") {
f <- y ~ x
groups <- ~ z
if(is.null(lty)) lty <- trellis.par.get("superpose.line")$lty
} else {
f <- y ~ x | z
groups <- NULL
if(is.null(lty)) lty <- trellis.par.get("superpose.line")$lty[2]
}
if(is.null(auto.key)) auto.key <- plot.type == "single"
if(is.null(scales)) scales <- list(x = list(at = 1:nitem, alternating = 1, labels = lab))
if(is.null(panel)) panel <- function(x, y, ...) {
panel.xyplot(x, y, ...)
panel.abline(h = ymean, reference = TRUE)
}
xyplot(f, groups = groups, data = d,
type = type, lty = lty, xlab = xlab, ylab = ylab,
auto.key = auto.key, scales = scales, panel = panel, ...)
} |
find_markers <- function(Y, references = NULL, pure_samples = NULL, data_type = NULL,
gamma = NULL, marker_method = "ratio") {
cmbd <- combine_Y_refs(Y, references, pure_samples)
Y <- cmbd$Y
pure_samples <- cmbd$pure_samples
if (any(lengths(pure_samples) == 1) & marker_method == "p.value") {
message("Can't use p.value method.")
marker_method <- "diff"
}
if (is.null(gamma))
gamma <- get_gamma(data_type)
K <- length(pure_samples)
N <- dim(Y)[2]
pure <- unlist(pure_samples)
C <- array(0, c(K, N))
colnames(C) <- colnames(Y)
if (marker_method == "ratio") {
avg_exp_fn <- function(x) {
colMeans(2^(Y[x, , drop = FALSE]))/gamma
}
eta_hats <- t(sapply(pure_samples, avg_exp_fn))
C <- t(sapply(1:K, function(i) {
eta_hats[i, ]/apply(eta_hats[-i, , drop = FALSE], 2, sum)
}))
} else if (marker_method == "regression") {
for (i in 1:K) {
X <- as.numeric(pure %in% pure_samples[[i]])
Yp <- as.matrix(Y[pure, ])
m <- stats::lm(Yp ~ 1 + X)
cfdf <- data.frame(t(stats::coef(m)))
C[i, ] <- cfdf$X
}
} else if (marker_method == "diff") {
for (i in 1:K) {
C[i, ] <- apply(Y[pure_samples[[i]], , drop = FALSE], 2, stats::median)
}
less_second <- function(x) {
x - sort(x, decreasing = TRUE)[2]
}
C <- apply(C, 2, less_second)
} else if (marker_method == "p.value") {
for (i in 1:K) {
C[i, ] <- apply(Y[pure_samples[[i]], , drop = FALSE], 2, mean)
}
calc_pvals <- function(i) {
x <- C[, i]
top <- which(x == max(x))[1]
second <- order(x, decreasing = TRUE)[2]
pvs <- rep(NA, length(x))
for (j in 1:length(x)) {
pvs[j] <- tryCatch({
x1 <- Y[pure_samples[[j]], i]
x2 <- Y[pure_samples[[second]], i]
n1 <- length(x1)
n2 <- length(x2)
sd1 <- stats::sd(x1)
sd2 <- stats::sd(x2)
sp <- sqrt(((n1 - 1) * sd1 + (n2 - 1) * sd2)/(n1 + n2 - 2))
t.value <- (mean(x1) - mean(x2))/(sp * sqrt((1/n1) + (1/n2)))
tmp <- stats::pt(abs(t.value), df = n1 + n2 - 2)
tmp
})
}
pvs[-top] <- 0
return(pvs)
}
C <- sapply(1:ncol(C), calc_pvals)
} else {
stop("Marker method not found.")
}
pick_top <- function(x) {
m <- which(x == max(x, na.rm = TRUE))
if (length(m) != 1)
return(c(NA, NaN))
return(c(m, x[m]))
}
M <- apply(C, 2, pick_top)
M <- data.frame(t(M))
colnames(M) <- c("top", "value")
M$rn <- 1:N
rownames(M) <- colnames(Y)
M$Cell.Type <- names(pure_samples)[M$top]
if (marker_method == "p.value") {
diffmm <- find_markers(Y = Y, pure_samples = pure_samples, data_type = data_type,
gamma = gamma, marker_method = "diff")$M
M$diff <- diffmm$value
iM <- M[stats::complete.cases(M), ]
sM <- iM[order(iM$top, -iM$value, -iM$diff), ]
} else {
iM <- M[stats::complete.cases(M), ]
sM <- iM[order(iM$top, -iM$value), ]
}
L <- lapply(1:K, function(i) {
vals <- sM[sM$top == i, "rn"]
names(vals) <- rownames(sM[sM$top == i, ])
return(vals)
})
V <- lapply(1:K, function(i) {
vals <- sM[sM$top == i, "value"]
names(vals) <- rownames(sM[sM$top == i, ])
return(vals)
})
names(L) <- names(pure_samples)
names(V) <- names(pure_samples)
return(list(L = L, V = V, M = M, sM = sM))
} |
"scurvy" |
metagenomics_abundance = utils::read.table("./data/metagenomics_abundance.txt", sep = '\t') |
.kcheck <- function(b) {
m <- length(b$smooth)
if (m==0) return(NULL)
kc <- edf<- rep(0,m)
snames <- rep("",m)
n <- nrow(b$model)
for (k in 1:m) {
ok <- TRUE
b$smooth[[k]]$by <- "NA"
snames[k] <- b$smooth[[k]]$label
ind <- b$smooth[[k]]$first.para:b$smooth[[k]]$last.para
kc[k] <- length(ind)
edf[k] <- sum(b$edf[ind])
}
k.table <- cbind(kc,edf)
dimnames(k.table) <- list(snames, c("k\'","edf"))
k.table
} |
library(refund) ; library(fda)
library(fda.usc) ; data(tecator)
y <- tecator$y$Fat ; X <- tecator$absorp.fdata$data
wavelength <- seq(850,1050,length = 100)
bbt <- create.bspline.basis(range(wavelength), nbasis=20)
Xfd <- smooth.basisPar(wavelength,t(X),bbt,2,1e-9)
Xderiv <- t(eval.fd(wavelength,Xfd$fd,1))
Xderiv2 <- t(eval.fd(wavelength,Xfd$fd,2))
fitPFRnoTrans <- pfr(y ~ af(Xderiv2,argvals=wavelength,
bs = "ps",k = c(7,7),m = list(c(2,2),c(2,2)),
Qtransform = FALSE), method = "REML")
fitPFRtrans <- pfr(y ~ af(Xderiv2,argvals = wavelength,
bs = "ps",k = c(7,7),m = list(c(2,2),c(2,2)),
Qtransform = TRUE),method = "REML")
par(mfrow=c(2,2),mai=c(0.8,0.7,0.3,0.15))
plot(fitPFRnoTrans,scheme = 2,
main = "Qtransform = FALSE,rug = FALSE",
xlab = "t",ylab = "x",bty = "l",cex.lab = 1.5,
cex.axis = 1.5,cex.main = 1.6,col.main = "navy",
rug = FALSE)
plot(fitPFRnoTrans,scheme = 2,
main = "Qtransform = FALSE,rug = TRUE",
xlab = "t",ylab = "x",bty = "l",
cex.lab = 1.5,cex.axis = 1.5,cex.main = 1.6,
col.main = "navy",rug = TRUE)
plot(fitPFRtrans,scheme = 2,
main = "Qtransform = TRUE,rug = FALSE",
xlab = "t",ylab = "x",bty = "l",
cex.lab = 1.5,cex.axis = 1.5,cex.main = 1.6,
col.main = "navy",rug = FALSE,Qtransform = TRUE)
plot(fitPFRtrans,scheme = 2,
main = "Qtransform = TRUE,rug = TRUE",
xlab = "t",ylab = "x",bty = "l",
cex.lab = 1.5,cex.axis = 1.5,cex.main = 1.6,
col.main = "navy",rug = TRUE,Qtransform = TRUE) |
collection_rebalanceleaders <- function(conn, name, maxAtOnce = NULL,
maxWaitSeconds = NULL, raw = FALSE, ...) {
conn$collection_rebalanceleaders(name, maxAtOnce, maxWaitSeconds, raw, ...)
} |
getDistribTaxa <- function(taxIDs,
climate = NA,
xmn = NA, xmx = NA, ymn = NA, ymx = NA,
continents = NA, countries = NA,
basins = NA, sectors = NA,
realms = NA, biomes = NA, ecoregions = NA,
elev_min = NA, elev_max = NA, elev_range = NA,
year_min = 1900, year_max = 2021, nodate = TRUE,
type_of_obs = c(1, 2, 3, 8, 9),
dbname = "gbif4crest_02") {
if(base::missing(taxIDs)) taxIDs
coords <- check_coordinates(xmn, xmx, ymn, ymx)
if (is.na(continents)[1] & is.na(countries)[1]) {
GEO_terr <- ""
} else {
GEO_terr <- paste0(
"AND countryID IN ",
" (SELECT distinct geopoID ",
" FROM geopolitical_units ",
" WHERE ",
ifelse(is.na(continents)[1], "", paste0("continent IN ('", paste(continents, collapse = "', '"), "') ")),
ifelse(is.na(continents)[1] | is.na(countries)[1], "", "AND "),
ifelse(is.na(countries)[1], "", paste0("name IN ('", paste(countries, collapse = "', '"), "') ")),
" ) "
)
}
if (is.na(basins)[1] & is.na(sectors)[1]) {
GEO_mari <- ""
} else {
GEO_mari <- paste0(
"AND oceanID IN ",
" (SELECT distinct geopoID ",
" FROM geopolitical_units ",
" WHERE ",
ifelse(is.na(basins)[1], "", paste0("basin IN ('", paste(basins, collapse = "', '"), "') ")),
ifelse(is.na(basins)[1] | is.na(sectors)[1], "", "AND "),
ifelse(is.na(sectors)[1], "", paste0("name IN ('", paste(sectors, collapse = "', '"), "') ")),
" ) "
)
}
if (is.na(realms)[1] & is.na(biomes)[1] & is.na(ecoregions)[1]) {
WWF <- ""
} else {
WWF <- paste0(
"AND terr_ecoID IN ",
" (SELECT distinct ecoID ",
" FROM biogeography ",
" WHERE ",
ifelse(is.na(realms)[1], "", paste0("realm IN ('", paste(realms, collapse = "', '"), "') ")),
ifelse(is.na(realms)[1] | is.na(biomes)[1], "", "AND "),
ifelse(is.na(biomes)[1], "", paste0("biome IN ('", paste(biomes, collapse = "', '"), "') ")),
ifelse(is.na(biomes)[1] | is.na(ecoregions)[1], ifelse(is.na(realms)[1] | is.na(ecoregions)[1], "", "AND "), "AND "),
ifelse(is.na(ecoregions)[1], "", paste0("ecoregion IN ('", paste(ecoregions, collapse = "', '"), "') ")),
" ) "
)
}
if (unique(is.na(climate))) {
CLIM3 <- paste(', ', paste(accClimateVariables()[,2], collapse = ", "))
climate <- accClimateVariables()[,2]
} else {
CLIM3 <- paste(', ', paste(climate, collapse = ", "))
}
if(dbname == 'crest_example') {
DATE <- ''
ELEVMIN <- ELEVMAX <- ELEVRANGE <- ''
TYPEOFOBS <- ''
} else {
DATEMIN <- ifelse(is.na(year_min), '', paste0(" AND last_occ >= ", year_min))
DATEMAX <- ifelse(is.na(year_max), '', paste0(" AND first_occ <=", year_max))
NODATE <- ifelse(is.na(nodate), '', paste0(" no_date = ", nodate))
DATE <- paste0(DATEMIN, DATEMAX)
if(nchar(DATE) > 0) DATE <- paste0('( ', substr(DATE, 5, nchar(DATE)), ') ')
if(nchar(NODATE) > 0) {
if(nchar(DATE) == 0) {
DATE <- paste0('AND ', NODATE)
} else {
DATE <- paste0('AND ( ', DATE, ' OR ', NODATE, ')')
}
} else {
DATE <- paste0('AND ', DATE)
}
ELEVMIN <- ifelse(is.na(elev_min), '', paste0(' AND elevation >= ', elev_min))
ELEVMAX <- ifelse(is.na(elev_max), '', paste0(' AND elevation <= ', elev_max))
ELEVRANGE <- ifelse(is.na(elev_range), '', paste0(' AND elev_range <= ', elev_range))
TYPEOFOBS <- ''
if(!unique(is.na(type_of_obs))) {
res <- dbRequest("SELECT * FROM typeofobservations ORDER BY type_of_obs", dbname)
for(i in type_of_obs) {
TYPEOFOBS <- paste(TYPEOFOBS, 'OR ', base::trimws(res[i,2]), '= TRUE ')
}
TYPEOFOBS <- paste('AND (', substr(TYPEOFOBS, 4, nchar(TYPEOFOBS)), ')')
}
}
req <- paste0(
" SELECT DISTINCT taxonid, locid ",
" FROM distrib_qdgc ",
" WHERE taxonID IN (", paste(taxIDs, collapse = ", "), ")",
" ", DATE, ' ',
" ", TYPEOFOBS, ' '
)
res <- dbRequest(req, dbname)
if(nrow(res) == 0) return(stats::setNames(data.frame(matrix(ncol = length(c('taxonid', 'longitude', 'latitude', climate)), nrow = 0)), c('taxonid', 'longitude', 'latitude', climate)))
req2 <- paste0(
" SELECT DISTINCT locid, longitude, latitude", CLIM3,
" FROM data_qdgc ",
" WHERE locid IN (", paste(unique(res[, 2]), collapse = ", "), ")",
" AND longitude >= ", coords[1], " AND longitude <= ", coords[2], " ",
" AND latitude >= ", coords[3], " AND latitude <= ", coords[4], " ",
" ", ELEVMIN, ' ',
" ", ELEVMAX, ' ',
" ", ELEVRANGE, ' ',
" ", GEO_terr, " ",
" ", GEO_mari, " ",
" ", WWF, " "
)
res2 <- dbRequest(req2, dbname)
res <- merge(res, res2, by='locid')
res[, c('taxonid', 'longitude', 'latitude', climate)]
} |
.prCalus <- function(oh, genotype)
{
maf_geno <- function(x)
{
z <- length(which(x == 0))
o <- length(which(x == 1))
maf <- (z * 2 + o)/(sum(!is.na(x)) * 2)
if (!is.na(maf))
if (maf > 0.5)
maf <- 1 - maf
maf
}
p <- apply(genotype, 2, .maf)
p <- p[!is.na(p)]
q <- 1 - p
maxsnpnooh <- (sum(p^2 * q^2) + sum(2 * (p^2 * q^2)))/2
maxsnpnooh <- maxsnpnooh - (.1 * maxsnpnooh)
cat("id group \n", file = "temp.txt")
rhsr_rc <- function(oh, maxsnpnooh = maxsnpnooh)
{
print("----")
d <- as.dist(.fastdist(oh))
if (length(d) > 2)
{
fit <- hclust(d, method = "ward")
groups <- cutree(fit, k = 2)
a <- which(groups == 1)
b <- which(groups == 2)
if (length(a) > 2)
{
subohA <- oh[a, a]
maxSubohA <- max(subohA[lower.tri(subohA)])
} else
{
maxSubohA <- 0
}
if (length(b) > 2)
{
subohB <- oh[b, b]
maxSubohB <- max(subohB[lower.tri(subohB)])
} else
{
maxSubohB <- 0
}
if (maxSubohA > maxsnpnooh && length(a) > 2)
{
rhsr_rc(oh[a, a], maxsnpnooh)
} else
{
write.table(data.frame(names(a), round(abs(rnorm(1) * 10^5))), "temp.txt", append = TRUE, col.names = FALSE, row.names = FALSE)
}
if (maxSubohB > maxsnpnooh && length(b) > 2)
{
rhsr_rc(oh[b, b], maxsnpnooh)
} else
{
write.table(data.frame(names(b), round(abs(rnorm(1) * 10^6))), "temp.txt", append = TRUE, col.names = FALSE, row.names = FALSE)
}
} else
{
if (!is.integer(oh))
write.table(data.frame(rownames(oh), round(abs(rnorm(1) * 10^6))), "temp.txt", append = TRUE, col.names = FALSE, row.names = FALSE)
}
}
result <- rhsr_rc(oh, maxsnpnooh)
result <- read.table("temp.txt", header = TRUE)
file.remove("temp.txt")
result
} |
computeSampleSizeInd <- function(survey.Data, herdSensitivity, groupVec = NULL){
lookupTable <- computeOptimalSampleSize(nPopulation = max(survey.Data@nAnimalVec),
prevalence = survey.Data@intraHerdPrevalence, alpha = (1-herdSensitivity),
sensitivity = survey.Data@diagSensitivity, specificity = 1, lookupTable = TRUE)
nAnimalLookup <- as.data.frame(lookupTable)
nAnimalLookup$interval <- paste("(", nAnimalLookup$N_lower-1, ",",
nAnimalLookup$N_upper, "]", sep = "")
breaks <- c(nAnimalLookup$N_lower[1]-1, nAnimalLookup$N_upper)
if (is.null(groupVec)){
nAnimalTable <- table(survey.Data@nAnimalVec)
nAnimalDataFrame <- data.frame(nAnimal = as.numeric(as.character(names(nAnimalTable))),
freq = as.vector(nAnimalTable), interval = cut(x = as.numeric(as.character(names(nAnimalTable))),
breaks = breaks, dig.lab = 10))
nAnimalDataFrame <- merge(x = nAnimalDataFrame,
y = subset(nAnimalLookup, select = c("interval", "sampleSize")),
by = "interval", all.x = TRUE, all.y = FALSE)
nAnimalsMeanPerHerd <- sum(with(nAnimalDataFrame, freq*sampleSize))/length(survey.Data@nAnimalVec)
} else {
splitList <- split(x = survey.Data@nAnimalVec, f = groupVec)
nAnimalsMeanPerHerdList <- lapply(splitList, function(nAnimalVec){
nAnimalTable <- table(nAnimalVec)
nAnimalDataFrame <- data.frame(nAnimal = as.numeric(as.character(names(nAnimalTable))),
freq = as.vector(nAnimalTable),
interval = cut(x = as.numeric(as.character(names(nAnimalTable))),
breaks = breaks, dig.lab = 10))
nAnimalDataFrame <- merge(x = nAnimalDataFrame,
y = subset(nAnimalLookup, select = c("interval", "sampleSize")),
by = "interval", all.x = TRUE, all.y = FALSE)
nAnimalsMeanPerHerd <- sum(with(nAnimalDataFrame, freq*sampleSize))/length(nAnimalVec)
})
nAnimalsMeanPerHerd <- Reduce(function(x,y) c(x,y), nAnimalsMeanPerHerdList)
names(nAnimalsMeanPerHerd) <- names(nAnimalsMeanPerHerdList)
}
return(list(lookupTable = lookupTable, nAnimalsMeanPerHerd = nAnimalsMeanPerHerd))
} |
s <- c("AB", "abc ", "", NA, "90", "ABC090")
expect_equal(dauphin:::grepl_digit(s), grepl("[0-9]", s))
expect_equal(dauphin:::gsub_09("1 234"), 1234)
expect_equal(dauphin:::gsub_09(c("000 2342", NA)), c(2342L, NA))
expect_equal(dauphin:::gsub_09(c("000 2342", "123 abc 234")), c(2342L, 123L)) |
Apriori_A <- function(dat, NumberofPartitionsforNumericAttributes=4,MinimumSupport=0.1,MinimumConfidence=0.8){
alg <- RKEEL::R6_Apriori_A$new()
alg$setParameters(dat,NumberofPartitionsforNumericAttributes,MinimumSupport,MinimumConfidence)
return (alg)
}
R6_Apriori_A <- R6::R6Class("R6_Apriori_A",
inherit = AssociationRulesAlgorithm,
public = list(
NumberofPartitionsforNumericAttributes=4,
MinimumSupport=0.1,
MinimumConfidence=0.8,
setParameters = function(dat, NumberofPartitionsforNumericAttributes=4,MinimumSupport=0.1,MinimumConfidence=0.8){
super$setParameters(dat)
self$NumberofPartitionsforNumericAttributes <- NumberofPartitionsforNumericAttributes
self$MinimumSupport <- MinimumSupport
self$MinimumConfidence <- MinimumConfidence
}
),
private = list(
jarName = "Apriori.jar",
algorithmName = "Apriori_A",
algorithmString = "Apriori_A",
algorithmOutputNumTxt = 1,
getParametersText = function(){
text <- ""
text <- paste0(text, "Number of Partitions for Numeric Attributes = ", self$NumberofPartitionsforNumericAttributes, "\n")
text <- paste0(text, "Minimum Support = ", self$MinimumSupport, "\n")
text <- paste0(text, "Minimum Confidence = ", self$MinimumConfidence, "\n")
return(text)
}
)
) |
NULL
setGeneric("getWindEstimate", function(groundSpeeds, phi, windStart=c(0,0)){
standardGeneric("getWindEstimate")
})
setMethod("getWindEstimate",signature = signature(groundSpeeds="matrix", phi="numeric",windStart="ANY"), function(groundSpeeds, phi, windStart) {
stopifnot(length(phi)==1)
stopifnot(length(windStart)==2)
optimResult = optim(windStart, calcVar, hessian = T, phi=phi, groundSpeeds=groundSpeeds)
n = nrow(groundSpeeds)
if (optimResult$convergence == 0) {
if (det(optimResult$hessian) != 0)
covar = solve(optimResult$hessian / optimResult$value * n / 2)
else
covar = NULL
list(
windEst = optimResult$par, residualVarAirSpeed = optimResult$value, covar =
covar
)
}
else
NULL
}
) |
SimulateGaussianAbundances <- function(nsites = 100, nspecies = 15, dimens = 2, maximum = 10, tol = 1) {
SiteNames = "S1"
for (i in 2:nsites) SiteNames = c(SiteNames, paste("S", i, sep = ""))
SpeciesNames = "Sp1"
for (i in 2:nspecies) SpeciesNames = c(SpeciesNames, paste("Sp", i, sep = ""))
X = matrix(rnorm(nsites * dimens), nsites, dimens)
U = matrix(rnorm(nspecies * dimens), nspecies, dimens)
A = rbind(X, U)
plot(A[, 1], A[, 2], cex = 0, asp = 1)
points(X[, 1], X[, 2], cex = 0.2)
text(X[, 1], X[, 2], SiteNames)
points(U[, 1], U[, 2], col = "red", pch = 16, , cex = 0.2)
text(U[, 1], U[, 2], SpeciesNames, col = "red")
for (i in 1:nspecies) Circle(tol, origin = c(U[i, 1], U[i, 2]), color = "red")
AB = round(maximum * exp(-0.5 * (1/tol) * DistUnfold(X, U)^2), digits = 1)
rownames(AB) = SiteNames
colnames(AB) = SpeciesNames
dev.new()
unf = Unfolding(AB, model = "Ratio", condition=2, weight=2)
plot(unf, PlotTol = T)
Pr = SimpleProcrustes(X, unf$X)
dev.new()
plot(Pr)
return(AB)
} |
orderQ <- function(Q, target){
if(!is.matrix(Q) & !is.data.frame(Q)){stop("Error in orderQ: Q must be a matrix or data.frame.")}
if(!is.matrix(target) & !is.data.frame(target)){stop("Error in orderQ: target must be a matrix or data.frame.")}
mQ <- Q
mT <- target
if(!is.matrix(mQ)){mQ <- as.matrix(mQ)}
if(!is.matrix(mT)){mT <- as.matrix(mT)}
K <- ncol(mQ)
order.k <- combinat::permn(1:K)
order.k.col <- as.numeric(sapply(order.k, paste, collapse = ""))
MAD <- CC <- c()
for(i in 1:length(order.k)){
tmp.MAD <- round(mean(apply(abs(mT - mQ[,order.k[[i]]]), 2, mean)), 3)
tmp.CC <- round(mean(diag((t(mT) %*% mQ[,order.k[[i]]]) / (sqrt(colSums(mT^2) %o% colSums(mQ[,order.k[[i]]]^2))))), 3)
if(is.na(tmp.MAD)){tmp.MAD <- 999}
if(is.na(tmp.CC)){tmp.CC <- 999}
MAD <- c(MAD, tmp.MAD)
CC <- c(CC, tmp.CC)
if(MAD[i] == 0 | CC[i] == 1){break}
}
order.Q <- mQ[,order.k[[which.max(CC)]]]
rownames(order.Q) <- 1:nrow(order.Q)
configs <- cbind(order = order.k.col[1:length(MAD)], MAD, CC)
configs <- configs[order(configs[,"order"]),,drop = FALSE]
rownames(configs) <- 1:nrow(configs)
spec <- list(Q = Q, target = target)
res <- list(order.Q = order.Q, configs = configs, specifications = spec)
class(res) <- "orderQ"
return(res)
} |
structure(list(url = "https://api.scryfall.com/cards/search?q=mm%3A123&unique=cards&order=name&dir=auto&include_extras=false&include_multilingual=false&include_variations=false",
status_code = 400L, headers = structure(list(date = "Wed, 05 Jan 2022 05:17:46 GMT",
`content-type` = "application/json; charset=utf-8", `x-frame-options` = "DENY",
`x-xss-protection` = "1; mode=block", `x-content-type-options` = "nosniff",
`x-download-options` = "noopen", `x-permitted-cross-domain-policies` = "none",
`referrer-policy` = "strict-origin-when-cross-origin",
`access-control-allow-origin` = "*", `access-control-allow-methods` = "GET, POST, DELETE, OPTIONS",
`access-control-allow-headers` = "Accept, Accept-Charset, Accept-Language, Authorization, Cache-Control, Content-Language, Content-Type, DNT, Host, If-Modified-Since, Keep-Alive, Origin, Referer, User-Agent, X-Requested-With",
`access-control-max-age` = "300", `x-robots-tag` = "none",
`cache-control` = "public, max-age=7200", `x-action-cache` = "HIT",
vary = "Accept-Encoding", `content-encoding` = "gzip",
`strict-transport-security` = "max-age=31536000; includeSubDomains; preload",
via = "1.1 vegur", `cf-cache-status` = "MISS", `expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
`report-to` = "{\"endpoints\":[{\"url\":\"https:\\/\\/a.nel.cloudflare.com\\/report\\/v3?s=NRXKGL8s4LNXSpn6q%2Ft%2FwXKnznUeulPOn7WtXrQV%2FcOvqPthnEioTKZjt%2FCWeEsJYuMnTsQuczG88V%2FOLdgKY27P4c2sMtolMeyDNVHBI41mMZYOaON1WjkZo0alWtWK5AOalGMQC7RiAXlUpC0%3D\"}],\"group\":\"cf-nel\",\"max_age\":604800}",
nel = "{\"success_fraction\":0,\"report_to\":\"cf-nel\",\"max_age\":604800}",
server = "cloudflare", `cf-ray` = "6c8a3df86bb54edd-GRU",
`alt-svc` = "h3=\":443\"; ma=86400, h3-29=\":443\"; ma=86400, h3-28=\":443\"; ma=86400, h3-27=\":443\"; ma=86400"), class = c("insensitive",
"list")), all_headers = list(list(status = 400L, version = "HTTP/2",
headers = structure(list(date = "Wed, 05 Jan 2022 05:17:46 GMT",
`content-type` = "application/json; charset=utf-8",
`x-frame-options` = "DENY", `x-xss-protection` = "1; mode=block",
`x-content-type-options` = "nosniff", `x-download-options` = "noopen",
`x-permitted-cross-domain-policies` = "none", `referrer-policy` = "strict-origin-when-cross-origin",
`access-control-allow-origin` = "*", `access-control-allow-methods` = "GET, POST, DELETE, OPTIONS",
`access-control-allow-headers` = "Accept, Accept-Charset, Accept-Language, Authorization, Cache-Control, Content-Language, Content-Type, DNT, Host, If-Modified-Since, Keep-Alive, Origin, Referer, User-Agent, X-Requested-With",
`access-control-max-age` = "300", `x-robots-tag` = "none",
`cache-control` = "public, max-age=7200", `x-action-cache` = "HIT",
vary = "Accept-Encoding", `content-encoding` = "gzip",
`strict-transport-security` = "max-age=31536000; includeSubDomains; preload",
via = "1.1 vegur", `cf-cache-status` = "MISS", `expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
`report-to` = "{\"endpoints\":[{\"url\":\"https:\\/\\/a.nel.cloudflare.com\\/report\\/v3?s=NRXKGL8s4LNXSpn6q%2Ft%2FwXKnznUeulPOn7WtXrQV%2FcOvqPthnEioTKZjt%2FCWeEsJYuMnTsQuczG88V%2FOLdgKY27P4c2sMtolMeyDNVHBI41mMZYOaON1WjkZo0alWtWK5AOalGMQC7RiAXlUpC0%3D\"}],\"group\":\"cf-nel\",\"max_age\":604800}",
nel = "{\"success_fraction\":0,\"report_to\":\"cf-nel\",\"max_age\":604800}",
server = "cloudflare", `cf-ray` = "6c8a3df86bb54edd-GRU",
`alt-svc` = "h3=\":443\"; ma=86400, h3-29=\":443\"; ma=86400, h3-28=\":443\"; ma=86400, h3-27=\":443\"; ma=86400"), class = c("insensitive",
"list")))), cookies = structure(list(domain = ".api.scryfall.com",
flag = TRUE, path = "/", secure = FALSE, expiration = structure(1641440572, class = c("POSIXct",
"POSIXt")), name = "heroku-session-affinity", value = "REDACTED"), row.names = c(NA,
-1L), class = "data.frame"), content = charToRaw("{\"object\":\"error\",\"code\":\"bad_request\",\"status\":400,\"warnings\":[\"Invalid expression “mm:123” was ignored. Unknown keyword “mm”.\"],\"details\":\"All of your terms were ignored.\"}"),
date = structure(1641359866, class = c("POSIXct", "POSIXt"
), tzone = "GMT"), times = c(redirect = 0, namelookup = 3.6e-05,
connect = 3.6e-05, pretransfer = 0.000124, starttransfer = 0.454867,
total = 0.455062)), class = "response") |
tar_resources <- function(
aws = NULL,
clustermq = NULL,
feather = NULL,
fst = NULL,
future = NULL,
parquet = NULL,
qs = NULL,
url = NULL
) {
envir <- environment()
names <- names(formals(tar_resources))
out <- list()
for (name in names) {
value <- envir[[name]]
class <- paste0("tar_resources_", name)
message <- paste0(
name,
" argument to tar_resources() must be output from tar_resources_",
name,
"() or NULL."
)
if (!is.null(value)) {
tar_assert_inherits(value, class, message)
out[[name]] <- value
}
}
out
} |
piaac.mean.pv <-
function(pvlabel, by, data, export=FALSE, name= "output", folder=getwd()) {
intsvy.mean.pv(pvnames = paste("PV", pvlabel, 1:10, sep=""),
by=by, data=data, export=export,
name=name, folder=folder, config=piaac_conf)
} |
setGeneric(
"erase",
function(obj, key, value) {
standardGeneric("erase")
},
package = "datastructures"
) |
plot.princals <- function(x, plot.type = "loadplot", plot.dim = c(1, 2), var.subset = "all",
col.scores = "black", col.loadings = "black", col.lines = "black", cex.scores = 0.8,
cex.loadings = 0.8, stepvec = NA, max.plot.array = c(2, 2), expand = 1,
asp = 1, main, xlab, ylab, xlim, ylim, ...)
{
match.arg(plot.type, c("biplot", "loadplot", "screeplot", "transplot"))
if ((x$ndim == 1) && (plot.type != "transplot")) stop("No biplot/loadings plot can be drawn for ndim = 1!")
nvar <- dim(x$loadings)[1]
if (plot.type == "loadplot") {
xycoor <- x$loadings[,plot.dim]
if (missing(xlim)) {
xlim.min <- min(xycoor[,1],0)
xlim.max <- max(xycoor[,1],0)
xlim <- c(xlim.min, xlim.max)*1.2
}
if (missing(ylim)) {
ylim.min <- min(xycoor[,2],0)
ylim.max <- max(xycoor[,2],0)
ylim <- c(ylim.min, ylim.max)*1.2
}
if (missing(xlab)) xlab <- paste("Component", plot.dim[1])
if (missing(ylab)) ylab <- paste("Component", plot.dim[2])
if (missing(main)) main <- "Loadings Plot"
plot(xycoor, type = "p", pch = ".", xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, main = main,
cex = cex.loadings, col = col.loadings, asp = asp, ...)
abline(h = 0, v = 0, col = "gray", lty = 2)
if (length(col.loadings) == 1) arcol <- rep(col.loadings, nvar) else arcol <- col.loadings
for (i in 1:nvar) arrows(0, 0, xycoor[i,1],xycoor[i,2], length = 0.08, col = arcol[i])
posvec <- apply(xycoor, 1, sign)[2,] + 2
text(xycoor, labels = rownames(xycoor), pos = posvec, cex = cex.loadings, col = col.loadings)
}
if (plot.type == "biplot") {
if (missing(main)) main <- "Biplot"
if (missing(xlab)) xlab <- paste("Component", plot.dim[1])
if (missing(ylab)) ylab <- paste("Component", plot.dim[2])
cols <- c(col.scores, col.loadings)
cexs <- c(cex.scores, cex.loadings)
biplot(x$objectscores[,plot.dim], x$loadings[,plot.dim], expand = expand,
col = cols, cex = cexs, main = main, xlab = xlab, ylab = ylab,
arrow.len = 0.08, ...)
}
if (plot.type == "transplot") {
if (missing(xlab)) xlab <- "Observed"
if (missing(ylab)) ylab <- "Transformed"
if (var.subset[1] == "all") var.subset <- rownames(x$loadings)
if (is.numeric(var.subset)) var.subset <- rownames(x$loadings)[var.subset]
if (missing(main)) main <- var.subset
nvars <- length(var.subset)
plotvars <- as.matrix(x$datanum[,var.subset])
xlabels <- as.data.frame(x$data[,var.subset])
ploty <- as.matrix(x$transform[,var.subset])
knotsv <- x$knots[var.subset]
ordv <- x$ordinal[var.subset]
if (missing(max.plot.array)) {
npanv <- ceiling(sqrt(nvars))
npanh <- floor(sqrt(nvars))
if (npanv * npanh < nvars) npanv <- npanv + 1
if (npanv == 1 && npanh == 1) parop <- FALSE else parop <- TRUE
} else {
if (length(max.plot.array) < 2){
npanv <- max.plot.array[1]
npanh <- max.plot.array[1]
} else {
npanv <- max.plot.array[1]
npanh <- max.plot.array[2]
}
npanv <- max(npanv, 1)
npanh <- max(npanh, 1)
if (npanv == 1 && npanh == 1) parop <- FALSE else parop <- TRUE
}
if (parop) op <- par(mfrow = c(npanv, npanh))
for (i in 1:nvars) {
x1 <- plotvars[,i]
y1 <- ploty[,i]
xy <- cbind(x1, y1)
ord <- order(xy[,1])
if (!is.factor(xlabels[,i])) xlabels[,i] <- round(xlabels[,i], 2)
if (is.na(stepvec[1])) crit <- length(knotsv[[i]]) == (length(unique(plotvars[,i]))-2) else crit <- stepvec[i]
if (crit) {
sfun0 <- stepfun(xy[ord,1][-1], xy[ord,2], f = 0)
if (ordv[i]) vert <- TRUE else vert <- FALSE
plot(sfun0, xlab = xlab, ylab = ylab, main = main[i], xaxt = "n", col = col.lines, do.points = FALSE, verticals = vert, ...)
axis(1, labels = xlabels[,i], at = x1)
} else {
plot(xy[ord,1], xy[ord,2], type = "l", xlab = xlab, ylab = ylab, main = main[i], xaxt = "n", col = col.lines, ...)
axis(1, labels = xlabels[,i], at = x1)
}
}
if (parop) on.exit(par(op))
}
if (plot.type == "screeplot") {
if (missing(main)) main <- "Scree Plot"
if (missing(xlab)) xlab <- "Number of Components"
if (missing(ylab)) ylab <- "Eigenvalues"
if (missing(ylim)) ylim <- c(0, max(x$evals))
nd <- length(x$evals)
plot(1:nd, x$evals, type = "b", xlab = xlab, ylab = ylab, main = main, xaxt = "n", pch = 20,
ylim = ylim, col = col.lines, ...)
axis(1, at = 1:nd, labels = 1:nd)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.