ID
int64 1
1.07k
| Comment
stringlengths 8
1.13k
| Code
stringlengths 10
4.28k
| Label
stringclasses 4
values | Source
stringlengths 21
21
| File
stringlengths 4
82
|
---|---|---|---|---|---|
1 |
exclude NAs from data
|
data <- data[complete.cases(data$average), ] summary(data) nrow(data)
|
Data Variable
|
https://osf.io/qhaf8/
|
pupillometry_tutorial_calignano.R
|
2 |
2. compare model indices (ICC, conditional and margina R²)
|
tab_model(model0.1, model1, model1_full) tab_model(model0.1, model2, model2_full) tab_model(model0.2, model3, model3_full)
|
Statistical Modeling
|
https://osf.io/8edp7/
|
Social Factors COVID-19_Konrad.R
|
3 |
Plot robust model 1 get predicted Values for different levels of rsa_socialresources
|
gg_model1 <- ggpredict(model1_robust, c("daycount[0, 25, 50, 75]", "rsa_socialresources[meansd]", "diagnosis"))
|
Visualization
|
https://osf.io/8edp7/
|
Social Factors COVID-19_Konrad.R
|
4 |
count of articles that generated data using experimental techniques (includes articles that use both, percentage calculated using total empirical articles)
|
GenerateData[2,2] <- sum(MMCPSR_emp$EHPdata) GenerateData[2,3] <- sum(MMCPSR_emp$EHPdata)/nrow(MMCPSR_emp)
|
Data Variable
|
https://osf.io/uhma8/
|
AnalysisPost-PAP.R
|
5 |
Data organization Rename columns/variable names to make it simpler variable names
|
colnames(srmadata) srmadata <- srmadata %>% rename(assessors = 2, journal = 3, pubyear = 4, pubmonth = 5, study.title = 6, pmid = 7, regist = 8, regist.num = 9, protocol = 10, title.ident = 11, ab.sources = 12, ab.eleg.crit = 13, ab.particip = 14, ab.interv = 15, ab.effect = 16, ab.included = 17, ab.outcome = 18, in.picos = 19, me.database = 20, me.search.avai = 21, me.grey.lit = 22, me.date.just = 23, me.lang.num = 24, me.picos.desc = 25, me.sele.dup = 26, me.extr.dup = 27, me.rob.desc = 28, me.rob.dup = 29, me.stat.desc = 30, me.heterog = 31, item.removed01 = 32, re.flowdia = 33, re.ssizes = 34, re.picos.desc = 35, re.lengths = 36, re.estim.desc = 37, re.meta.studies = 38, re.rob = 39, re.deviations = 40, di.spin = 41, di.rob.studies = 42, di.limitations = 43, data.statem = 44, fund.statem = 45, funders = 46, coi.statem = 47)
|
Data Variable
|
https://osf.io/ntw7d/
|
SRMA2019_analyses.R
|
6 |
Redefine columns types to factor
|
srmadata <- data.frame(srmadata) srmadata %>% mutate_if(is.character, as.factor) %>% str()
|
Data Variable
|
https://osf.io/ntw7d/
|
SRMA2019_analyses.R
|
7 |
Add labels to variables in participant dataframe;; then, generate table
|
table1::label(participant$ab.particip) <- "Description of participants (ab)" table1::label(participant$re.picos.desc) <- "Detailed studies' characteristics" table1::table1(~ab.particip + re.picos.desc, data = participant)
|
Data Variable
|
https://osf.io/ntw7d/
|
SRMA2019_analyses.R
|
8 |
HISTOGRAM Generate a histogram of density of scores achieved by the 104 assessed studies Create a 0 or 1 dataset, where No0 and Yes1, with respective study IDs (variable "id")
|
id <- (1:104) allbinary <- data.frame(id, transparency, completenessbinary, participant, intervention, outcome, rigorbinary, appraisalbinary) allbinary[allbinary == "Yes"] <- "1" allbinary[allbinary == "No"] <- "0" lapply(allbinary,as.numeric)
|
Visualization
|
https://osf.io/ntw7d/
|
SRMA2019_analyses.R
|
9 |
Set the dataframe as numeric so that we can sum up recommended practices for each study (variable "yes.score")
|
allbinary[] <- lapply(allbinary, function(x) as.numeric(as.character(x)))
|
Data Variable
|
https://osf.io/ntw7d/
|
SRMA2019_analyses.R
|
10 |
Create a new dataframe (binarytotalsdf) with a new variable (yes.score) that reflects the number of recommended practices from each study
|
binary-scoredf <- allbinary %>% mutate(yes.score = regist + protocol + me.search.avai + data.statem + + title.ident + + ab.sources + ab.eleg.crit + ab.included + in.picos + me.picos.desc + re.flowdia + + re.ssizes + re.lengths + fund.statem + coi.statem + ab.particip + re.picos.desc + + ab.interv + re.picos.desc + ab.outcome + me.stat.desc + me.heterog + re.estim.desc + + re.meta.studies + me.grey.lit + me.date.just + me.lang.num + me.sele.dup + me.extr.dup + + me.rob.desc + me.rob.dup +re.rob + re.deviations + di.spin + di.rob.studies + + di.limitations)
|
Data Variable
|
https://osf.io/ntw7d/
|
SRMA2019_analyses.R
|
11 |
Create the histogram
|
histplot.score <- ggplot(binary-scoredf, aes(x=yes.score)) + geom_histogram(binwidth=1, color="black", fill="lightblue") histplot.score + scale_x_continuous(name="Number of recommended practices (max: 36 items)", breaks=seq(0,36,2)) + scale_y_continuous(name="Frequency of publications", limits=c(0, 20)) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"))
|
Visualization
|
https://osf.io/ntw7d/
|
SRMA2019_analyses.R
|
12 |
Load data: data file "hfp" contains individuallevel foraging return data (kcal) at the daily level along with the date, unique camp id, age, sex, and unique person id.
|
hfp <- read.csv("food_pro_data.csv", as.is=T) h <- hfp[hfp$age > 5,] h$age_z <- (h$age-mean(h$age, na.rm=T))/sd(h$age, na.rm=T) h$month <- month(ymd(h$date))
|
Data Variable
|
https://osf.io/92e6c/
|
hadza_returns_model.R
|
13 |
Plot the marginal/conditional effects for predicted values and probability of zerodays
|
p <- conditional_effects(hadza_lognormal_returns_model, effects=c("age_z:sex")) # includes the hurdle component p2 <- conditional_effects(hadza_lognormal_returns_model, effects=c("age_z:sex"), dpar="hu") # to see the hurdle component (Pr zero-days) plot(p)[[1]] + scale_x_continuous(breaks = c(-1, 0, 1, 2), labels= round(c(-1, 0, 1, 2)*sd(h$age, na.rm=T) + mean(h$age, na.rm=T))) + ggplot2::labs(x="Age", y="kcal/day") +ggplot2::lims(y=c(0,6500)) + theme_classic() plot(p2)[[1]] + ggplot2::lims(y=c(0,1)) + labs(x="Age", y="Probability of zero day\n(hurdle component of model)") + theme_classic() + scale_x_continuous(breaks = c(-1, 0, 1, 2), labels= round(c(-1, 0, 1, 2)*sd(h$age, na.rm=T) + mean(h$age, na.rm=T)))
|
Visualization
|
https://osf.io/92e6c/
|
hadza_returns_model.R
|
14 |
Calculate overall mean standard length (Ls)
|
Ls <- mean(standard.length, na.rm = TRUE)
|
Data Variable
|
https://osf.io/6ukwg/
|
stuart.R
|
15 |
Generate Psi of an AR(1) model
|
Psi = diag(p) diag(Psi) = runif(p,b.ar.min,b.ar.max) return(Psi) }
|
Statistical Modeling
|
https://osf.io/rs6un/
|
Psi.PS.AR.Matrix.R
|
16 |
We use the R package qgraph to visualize our matrix
|
if (!require(qgraph)) install.packages("qgraph");; require(qgraph)
|
Visualization
|
https://osf.io/3kem6/
|
Rcode_Figure2.R
|
17 |
VISUALIZE YOUR NETWORKS Make sure you have the right package: qgraph
|
if (!require(qgraph)) install.packages("qgraph");; require(qgraph)
|
Visualization
|
https://osf.io/3kem6/
|
Rcode_Figure2.R
|
18 |
load source functions and data generates four data sets: FF, FS, SF, SS
|
source("prep-Exp1-data.r") source("pmwg-DIC.r")
|
Data Variable
|
https://osf.io/wbyj7/
|
Exp1-LBA-null.r
|
19 |
estimate model independently for each condition in the experiment
|
for(condition in names(all.data)) { cat("\n\n\n\nEstimating model for: ", condition, "\n\n") fnam <- paste0("Exp1-LBA-", model.par, "-", condition, ".RData")
|
Statistical Modeling
|
https://osf.io/wbyj7/
|
Exp1-LBA-null.r
|
20 |
Then write loop to go through all Scopus IDs, adding the output to the first dataframe NOTE: create 'output' folder in your working directory before running the following code
|
for (i in 1:length(authors)){ tryCatch({ #using tryCatch () to go around error https://stackoverflow.com/questions/14748557/skipping-error-in-for-loop. So here it skips the authorIDs with an error and keeps going res = retrievalByAuthorID(authors[i], apik) M2 = res$M output <- rbind(M,M2) write.csv(output, paste0(output$AU_ID[1],".csv"), row.names=F) #creates seperate csv's for each Scopus ID }, error=function(e){}) }
|
Data Variable
|
https://osf.io/7v4ep/
|
Collaboration boosts career progression_part
|
21 |
Select first letter of first name + surname
|
for(i in names(x)){ firstname = word(x[[i]],-1) initial = substring(firstname, 1, 1) surname = word(x[[i]],-2) surname[is.na(surname)] <- "" tot = as.data.frame(paste(initial, surname,sep = ".")) tot[tot=="."]="" x[[i]] <- tot #join first and last name with a full stop }
|
Data Variable
|
https://osf.io/7v4ep/
|
Collaboration boosts career progression_part
|
22 |
Add a column per author, indicating which papers belong to them (0 or 1)
|
authors = as.character(unique(authorswithAPInotworking$author.name)) x[is.na(x)]="" for(i in (authors)){ x$i = rowSums(x == i) colnames(x)[colnames(x) == 'i'] <- i } ncol.new = ncol(x)
|
Data Variable
|
https://osf.io/7v4ep/
|
Collaboration boosts career progression_part
|
23 |
Have a look at distribution first and last years, to look at outliers
|
hist(as.numeric(new$first.year)) hist(as.numeric(new$last.year))
|
Visualization
|
https://osf.io/7v4ep/
|
Collaboration boosts career progression_part
|
24 |
Add position of word in sentence (prefabricated list made in Python)
|
positionlist <- read.delim("U:/surfdriveRU/Thesis analyse/LMM analyse/positionlist.txt", header = FALSE) eyetrackingdata$position <- positionlist$V1
|
Data Variable
|
https://osf.io/qynhu/
|
combinealldata.R
|
25 |
density plotting function
|
denschart3 <- function (x, labels = NULL, groups = NULL, gdata = NULL, cex = par("cex"), pt.cex = cex, bg = par("bg"), color = "grey20", colorHPDI ="grey60", HPDI=0.9, vline = NULL, gcolor = par("fg"), lcolor = "gray", xlim = range(unlist(x)), yvals = 1:length(x), yextra=0.7, main = NULL, xlab = NULL, ylab = NULL, height=0.7 , border=NA, adjust=1, ...) { opar <- par("mai", "mar", "cex", "yaxs") on.exit(par(opar)) par(cex = cex, yaxs = "i") if (!is.list(x)) stop("'x' must be a list of vectors or matrices") n <- length(x) glabels <- NULL if (is.list(x)) { if (is.null(labels)) labels <- names(x) if (is.null(labels)) labels <- as.character(1L:n) labels <- rep_len(labels, n)
|
Visualization
|
https://osf.io/a3yd4/
|
Functions_IRT.R
|
26 |
Data wrangling recode values for missing data (9, 999) in whole dataset as NA
|
data <- data %>% mutate_all(~na_if(., -999)) data <- data %>% mutate_all(~na_if(., -9))
|
Data Variable
|
https://osf.io/r4wg2/
|
stadyl_analyses.R
|
27 |
Moderated regression center variables
|
data$ls.1c <- data$ls.1 - 3.57 data$sc.c <- scale(data$sc, center = T, scale = F) data$mob.c <- scale(data$mob, center = T, scale = F) data$age.c <- scale(data$age, center = T, scale = F)
|
Statistical Modeling
|
https://osf.io/r4wg2/
|
stadyl_analyses.R
|
28 |
get standardised regression coefficients
|
lm.beta(fit1) lm.beta(fit2)
|
Statistical Modeling
|
https://osf.io/r4wg2/
|
stadyl_analyses.R
|
29 |
Plots Moderated regression
|
psych::describe(data[c("sc", "mob")]) interact_plot(fit1, pred = mob.c, modx =sc.c, #partial.residuals = TRUE, interval = TRUE, int.width = 0.95, modx.values = c(-1.48, 0, 1.48), colors = c("#D9ED92", "#52B69A", "#1E6091"), modx.labels = c("Low (- 1SD)", "Middle (Mean)", "High (+ 1SD)"), legend.main = "Perceived status loss") + labs(y = "Predicted life satisfaction", x = "Upward mobility beliefs") + guides(fill = guide_legend(title = "Perceived status loss")) + coord_cartesian(ylim = c(1,7)) + scale_y_continuous(expand = c(0, 0), breaks = c(1,2,3,4,5,6,7)) + scale_x_continuous(expand = c(0.05, 0.05), breaks = c(-2.94,-1.94, -0.94, 0.06, 1.06, 2.06, 3.06), labels = c(0,1,2,3,4,5,6)) + theme_classic(base_size = 15) + theme( axis.title = element_text(colour = "black", size = 19, margin = margin(t = 0, r = 15, b = 0, l = 0)), axis.text.x = element_text(colour = "black", size = 19, margin = margin(t = 15, r = 0, b = 0, l = 0)), axis.text.y = element_text(colour = "black", size = 19), legend.position = c(0.7,0.85), legend.title = element_text(size = 17), legend.text = element_text(size = 17), legend.key.width = unit(1,"cm") ) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank())
|
Visualization
|
https://osf.io/r4wg2/
|
stadyl_analyses.R
|
30 |
get diffs get rownumbers in order to get the corresponding RecordingTime
|
rownums_parent <- fi_pairs$fiend[latencies[[curr_colname]][[curr_hitname]]$fi_pairs[[1]]] rownums_successor <- fi_pairs$fistart[latencies[[curr_colname]][[curr_hitname]]$fi_pairs[[2]]]
|
Data Variable
|
https://osf.io/mp9td/
|
get_gazeshift_latency.R
|
31 |
Funnel plot carry out trimandfill analysis
|
taf<-trimfill(m.random,main="", ma.fixed = FALSE, fixed = FALSE, random = TRUE, label=F)
|
Visualization
|
https://osf.io/dqjyh/
|
Script.R
|
32 |
draw funnel plot with missing studies filled in
|
funnel(taf, legend=TRUE) summary(trimfill(m.random))
|
Visualization
|
https://osf.io/dqjyh/
|
Script.R
|
33 |
create container variable to store results
|
if(exists("data.full") == 0){ subject <- c(1:50) mean.3SD <- NA sd.3SD <- NA min.3SD <- NA max.3SD <- NA container.3SD <- data.frame(subject, mean.3SD, sd.3SD, min.3SD, max.3SD) } container.3SD$mean.3SD[i] <- mean(data.3SD$rt) container.3SD$sd.3SD[i] <- sd(data.3SD$rt) container.3SD$min.3SD[i] <- container.3SD$mean.3SD[i] - 3*container.3SD$sd.3SD[i] container.3SD$max.3SD[i] <- container.3SD$mean.3SD[i] + 3*container.3SD$sd.3SD[i]
|
Data Variable
|
https://osf.io/5yvnb/
|
analyse_final_Exp5_OSF.R
|
34 |
setting up colors and line types for plots
|
ns=unique(qms$name) nl=length(ns) disp=data.frame(name=ns,lty=rep(1:9,nl)[1:nl]) disp$col=ifelse(grepl('Hit|Rod',disp$name),'#880101','#aaaaaa') disp$lty[grepl('Hit|Rod',disp$name)]=1:2
|
Visualization
|
https://osf.io/vwq9p/
|
analysis.R
|
35 |
plot the results of the vector fits color ages > 85 in red
|
col <- c("gray", "red")[1 + (age >= 85)] main <- "PCoA of Pollen: pollen and independent vectors fitted onto ordination" plot(poln.ord$points, pch = 21, type = "p", xlab = paste0("PCoA I (", round(var.expl[1] * 100, 1), "%)"), ylab = paste0("PCoA II (", round(var.expl[2] * 100, 1), "%)"), main = main, col = "black", bg = col, cex = 1.5, xaxt = "n", yaxt = "n", bty = "n") axis(1, at = seq(-0.5, 1, by = 0.25)) axis(2, at = seq(-1, 0.25, by = 0.25)) legend(0.5, -0.85, pch = c(21, 21, NA), lty = c(NA, NA, 1), lwd = c(NA, NA, 2), col = c("black", "black", "light blue"), pt.bg = c("gray","red", NA), cex = 1.25, legend = c("Post 85k", "Pre 85k", "Lake Level"))
|
Visualization
|
https://osf.io/7h94n/
|
Malawi_ordination.R
|
36 |
create categorical variables
|
share <- to_factor( share, select = c("health_past3months", "wave", "gender", "covid_affected", "partnerinhh", "covid_regime_si3", "covid_regime_ch3") ) share$stringency_index <- share$global_covid_regime_si3
|
Data Variable
|
https://osf.io/cht59/
|
01_tables_1_2.r
|
37 |
Overall mean of Social factors
|
share_all <- data_filter(share, !is.na(health_past3months))
|
Data Variable
|
https://osf.io/cht59/
|
01_tables_1_2.r
|
38 |
Model comparison apply hierarchical versions of CC, EWA, LMM, and motivational EWA models
|
setwd("SET TO MODEL FILE DIRECTORY") data <- list("groupSize", "ngroups", "ntrials", "ntokens", "pi", "vals","c","Gc","c_choice_index","Ga") #data inputted into jags params <- c("mu_c") #parameters we'll track in jags samplesCC <- jags(data, inits=NULL, params, model.file ="CC_group.txt", n.chains=3, n.iter=5000, n.burnin=1000, n.thin=1) params <- c("mu_c") #parameters we'll track in jags samplesEWA <- jags(data, inits=NULL, params, model.file ="EWA_group.txt", n.chains=3, n.iter=5000, n.burnin=1000, n.thin=1) params <- c("nu_c") #parameters we'll track in jags samplesLMM <- jags(data, inits=NULL, params, model.file ="LMM_group.txt", n.chains=3, n.iter=5000, n.burnin=1000, n.thin=1)
|
Statistical Modeling
|
https://osf.io/meh5w/
|
modelComparison.R
|
39 |
Number of subjects in each group Create a group variable according to each cluster condition
|
if (size == 1){ N.size = N/K n.group = unlist(lapply(1:K, function(k) rep(k,N.size))) } if (size == 2){ if (K == 2){ N.1 = 0.10*N N.2 = N - N.1 n.group = c(rep(1,N.1),rep(2,N.2)) } if (K == 4){ N.1 = 0.10*N N.rest = N - N.1 N.size = N.rest/(K-1) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size),rep(4,N.size)) }} if (size == 3){ if (K == 2){ N.1 = 0.6*N N.2 = N - N.1 n.group = c(rep(1,N.1),rep(2,N.2)) } if (K == 4){ if (N == 20){ N.1 = 0.6*N N.rest = N - N.1 N.size = floor(N.rest/(K-1)) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size+1),rep(4,N.size+1)) } else{ N.1 = 0.6*N N.rest = N - N.1 N.size = N.rest/(K-1) n.group = c(rep(1,N.1),rep(2,N.size),rep(3,N.size),rep(4,N.size)) }}}
|
Data Variable
|
https://osf.io/rs6un/
|
Data.Cluster.VAR.R
|
40 |
Create variable subjno Scramble data so people appear unordered in the final dataset
|
ID = expand.grid(1:T,1:N)[,2] data = cbind(data,ID) p = ncol(Psi[[1]]) Y = matrix(0,nrow(data),p) colnames(Y) = sprintf("Y%d",seq(1:p))
|
Data Variable
|
https://osf.io/rs6un/
|
Data.Cluster.VAR.R
|
41 |
Frequency plot
|
top.freqs = sort(colSums(data.table), decreasing = T)[1:40] cex.val = 1.6 cairo_pdf('pdfs/top_freq_segments.pdf', width = 16, height = 10) par(family = "Doulos SIL") plot(top.freqs ~ seq_along(top.freqs), xlab = 'Frequency rank', ylab = 'Frequency', xlim = c(1, 40), type = 'n', xaxt = 'n', cex.lab = cex.val, cex.axis = cex.val) axis(1, at = seq_along(top.freqs), labels = seq_along(top.freqs), cex.axis = 1.1) lines(seq_along(top.freqs), top.freqs, lty = 2, col = 'grey') text(seq_along(top.freqs), top.freqs, labels = names(top.freqs), cex = 2) dev.off()
|
Visualization
|
https://osf.io/2qjn5/
|
redraw_figures.R
|
42 |
Contrast between countries as a new variable
|
d$CountC<-as.numeric(as.integer(as.factor(d$Country))-1.5) table(d$Country,d$CountC)
|
Data Variable
|
https://osf.io/fr5ed/
|
01_data_prepare.R
|
43 |
Estimated Marginal Means Model 3
|
m1 <- ggemmeans(model3, c("welle [1:4 by=.2]", "isced")) m2 <- ggemmeans(model3, c("welle [1:4 by=.2]", "aee_oecd_between_z2")) m3 <- ggemmeans(model3, c("welle [1:4 by=.2]", "lone6_between_z2")) m1$Model = "ISCED" levels(m1$group) <- c("low", "middle", "high") m2$Model = "Income" levels(m2$group) <- c("-1 SD", "Mean", "+1 SD") m3$Model = "Loneliness" levels(m3$group) <- c("-1 SD", "Mean", "+1 SD") create_plot(m1, title = "(a) PF and Education") create_plot(m2, title = "(b) PF and Income") create_plot(m3, title = "(c) PF and Loneliness")
|
Statistical Modeling
|
https://osf.io/dcw4x/
|
04_figures_estimates_marginal_means.R
|
44 |
Matrix with correlations and pvalues
|
Cormatrix1 <- data.frame(matrix(NA,nrow = 15, ncol = 15)) for(i in 1:14){ for(j in (i+1):15){ Cormatrix1[i,j] <- round(cor(Cordata[,i], Cordata[,j]), digits=2) Cormatrix1[j,i] <- round(cor.test(Cordata[,i], Cordata[,j])$p.value, digits=2) } } Cormatrix1
|
Data Variable
|
https://osf.io/t93pf/
|
Simulation-Random-Significance.r
|
45 |
Matrix with correlations and significance stars
|
Cormatrix2 <- data.frame(matrix(NA,nrow = 15, ncol = 15)) for(i in 1:14){ for(j in (i+1):15){ Cormatrix2[i,j] <- round(cor(Cordata[,i], Cordata[,j]), digits=2) Cormatrix2[j,i] <- ifelse(cor.test(Cordata[,i], Cordata[,j])$p.value<=0.05, "*", "no") } } Cormatrix2
|
Visualization
|
https://osf.io/t93pf/
|
Simulation-Random-Significance.r
|
46 |
Generate Psi matrix for each cluster
|
for (n in 1:length(N)){ for (t in 1:length(T)){ for (p in 1:length(P)){ for (k in 1:length(K)){ for (d in 1:length(diff)){ for (r in 1:R){ Psi.list = lapply(1:K[k], function(kl) Psi.Matrix.Diff(P[p],b.ar.min,b.ar.max,b.cr.min,b.cr.max,d))
|
Data Variable
|
https://osf.io/rs6un/
|
Code_Simulation_Part_III_MVAR_RE.R
|
47 |
Load training and testing set
|
load(file = paste("Data_Block_Cluster_MVAR_RE_N_",N[n],"_T_",T[t],"_P_",P[p],"_K_",K[k], "_Diff_",diff[d],"_size_",size[s],"_R_",r,".RData",sep = "")) MSE.MVAR.PS.Sys = MSE.Sys(data.list,P[p]) save(MSE.MVAR.PS.Sys, file = paste("MSE_MVAR_Cluster_MVAR_RE_N_",N[n],"_T_",T[t],"_P_",P[p],"_K_",K[k], "_Diff_",diff[d],"_size_",size[s],"_R_",r,".RData",sep = ""))
|
Data Variable
|
https://osf.io/rs6un/
|
Code_Simulation_Part_III_MVAR_RE.R
|
48 |
standardize the model inputs, excluding the response and random effects
|
d_std <- stand(d, cols = f2) # use the fitting function for convenience
|
Statistical Modeling
|
https://osf.io/3gfqn/
|
VADIS_particles_InnerC-only.R
|
49 |
> Left panel: QQplot (uniform distribution) > Right panel: Residuals against predicted values;; shaded (due to sample size) with extreme residuals colored red, and 3) MAIN EFFECTS OF KEY VARIABLES FormatInfo
|
emmip(CorResult, ~FormatInfo, type = "response", CIs = TRUE) (emm <- emmeans(CorResult, specs = ~FormatInfo, type = "response")) pairs(emm) EffPlotData_CorrFormat <- summary(emm) ## Data for Fig. 3
|
Visualization
|
https://osf.io/2sz48/
|
Model_Correctness.R
|
50 |
Rescale order of variables on yaxis (for BRT figures)
|
BRT.plot.label.limits <- c("site.centrality", "mean.annual.flow", "basin.area", "site.long", "site.lat", "pct.ISC", "pct.urb", "pct.ag", "pct.for", "ALG.cover", "NAT.cover", "LWD.reach", "DOC", "cond", "pH.lab", "total.P", "NH4", "NO3")
|
Visualization
|
https://osf.io/62je8/
|
DMS-NRSA-CA-QC-Figures.R
|
51 |
Rename variables names on yaxis (for BRT figures)
|
BRT.plot.labels <- c("site.centrality" = "Cent", "mean.annual.flow" = "Flow", "basin.area" = "Area", "site.long" = "Long", "site.lat" = "Lat", "pct.ISC" = "ISC", "pct.urb" = "Urb", "pct.ag" = "Ag", "pct.for" = "For", "ALG.cover" = "Alg", "NAT.cover" = "Nat", "LWD.reach" = "LWD", "DOC" = "DOC", "cond" = "Cond", "pH.lab" = "pH", "total.P" = "TP", "NH4" = expression(NH[4]), "NO3" = expression(NO[3]))
|
Visualization
|
https://osf.io/62je8/
|
DMS-NRSA-CA-QC-Figures.R
|
52 |
Sample posetrior and prior for graphical comparison
|
post1<-extract.samples(m1) set.seed(42) prio1<-extract.prior(m1,n=10000) save.image(file="posterior_samples_single.RData")
|
Visualization
|
https://osf.io/fr5ed/
|
02_analysis_single_estimate.R
|
53 |
correlation matrix for the DVs
|
judcorMat2 <- lowerCor(judgmentRatings2) corr.test(judgmentRatings2) corrplot(judcorMat2, method="color", type = 'lower',tl.col="black", addCoef.col = "black", tl.srt = 45)
|
Statistical Modeling
|
https://osf.io/dhmjx/
|
Experiment4a-Analyses.R
|
54 |
Subset data that includes only partner cooperation means within 0.4 0.6
|
bound_data <- subset(expt1_data, expt1_data$cooplc_means >= 0.4 & expt1_data$cooplc_means <= 0.6 & expt1_data$readlc_means >= 0.4 & expt1_data$readlc_means <= 0.6)
|
Data Variable
|
https://osf.io/zcv4m/
|
winke_stevens_2017_rcode.R
|
55 |
Plot histogram of chances that partner's choice was positive for all data
|
coop_hist_ggplot <- ggplot(all_pc, aes(x = alllc_means * 100)) + geom_histogram(aes(fill = included), bins = 50) + # plot histogram scale_fill_manual(values = c("black", "grey50"), name="", label=c("Included", "Not included")) + # color values inside and outside of 0.4-0.6 differently labs(x = "Percent partner positive actions", y = "Number of participants") + # label axes theme_classic() + # use classic theme theme(axis.title=element_text(size=45), axis.text=element_text(size=30), legend.text=element_text(size=30), legend.position = c(0.25, 0.9), legend.key.size = unit(2.5, 'lines')) png(file = "figures/partner_action_histogram.png", width = 1200, height = 750) # open device plot(coop_hist_ggplot) # plot figure dev.off() # close device
|
Visualization
|
https://osf.io/zcv4m/
|
winke_stevens_2017_rcode.R
|
56 |
Analyze accuracy as a function of payoff scheme (Standard or Costly), context (Cooperation or Newspaper), and partner action (Cooperate or Defect) Conduct binomial GLMM of payoff scheme * partner action + context for memory accuracy
|
accuracy_glmer_full <- glmer(accuracy ~ payoff_scheme * partner_action * context + (1 | subject), bound_data, family = binomial(link = "logit")) # calculate GLMM of full model
|
Statistical Modeling
|
https://osf.io/zcv4m/
|
winke_stevens_2017_rcode.R
|
57 |
Conduct binomial GLMMs for memory accuracy to calculate BIC values to transform to Bayes factors
|
accuracy_glmer_null <- summary(glmer(accuracy ~ (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for null model accuracy_glmer_payoff <- summary(glmer(accuracy ~ payoff_scheme + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for payoff scheme accuracy_glmer_context <- summary(glmer(accuracy ~ context + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for context accuracy_glmer_action <- summary(glmer(accuracy ~ partner_action + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for partner action accuracy_glmer_payoff_action <- summary(glmer(accuracy ~ payoff_scheme + partner_action + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for payoff_scheme + partner_action accuracy_glmer_payoff_action_inter <- summary(glmer(accuracy ~ payoff_scheme * partner_action + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for payoff_scheme * partner_action accuracy_glmer_action_context <- summary(glmer(accuracy ~ partner_action + context + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for partner_action + context accuracy_glmer_action_context_inter <- summary(glmer(accuracy ~ partner_action * context + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for partner_action * context accuracy_glmer_payoff_action_context <- summary(glmer(accuracy ~ payoff_scheme + partner_action + context + (1 | subject), bound_data, family = binomial(link = "logit"))) # calculate GLMM for payoff_scheme * partner_action * context last_accuracy_glmer_null <- summary(glmer(accuracy ~ (1 | subject), last_data, family = binomial(link = "logit"))) # calculate GLMM for null model last_accuracy_glmer_payoff <- summary(glmer(accuracy ~ payoff_scheme + (1 | subject), last_data, family = binomial(link = "logit"))) # calculate GLMM for payoff scheme last_accuracy_glmer_action <- summary(glmer(accuracy ~ partner_action + (1 | subject), last_data, family = binomial(link = "logit"))) # calculate GLMM for partner action last_accuracy_glmer_payoff_action <- summary(glmer(accuracy ~ payoff_scheme + partner_action + (1 | subject), last_data, family = binomial(link = "logit"))) # calculate GLMM for payoff_scheme + partner_action last_accuracy_glmer_payoff_action_inter <- summary(glmer(accuracy ~ payoff_scheme * partner_action + (1 | subject), last_data, family = binomial(link = "logit"))) # calculate GLMM for payoff_scheme * partner_action
|
Statistical Modeling
|
https://osf.io/zcv4m/
|
winke_stevens_2017_rcode.R
|
58 |
Conduct binomial GLMM of payoff scheme * partner action + context for memory accuracy
|
last_accuracy_glmer_full <- glmer(accuracy ~ payoff_scheme * partner_action + (1 | subject), last_data, family = binomial(link = "logit")) # calculate GLMM of full model
|
Statistical Modeling
|
https://osf.io/zcv4m/
|
winke_stevens_2017_rcode.R
|
59 |
Calculate correlation between mean number of contacts and mean memory accuracy
|
coop_contacts_cor2 <- cor.test(expt2_data_subj$accuracy, expt2_data_subj$contacts) # calculate network size/accuracy correlation coop_contacts2_bfdf <- data.frame(accuracy = expt2_data_subj$accuracy, contacts = expt2_data_subj$contacts) # create new data frame for Bayesian analysis coop_contacts2_lmbf <- lmBF(accuracy ~ contacts, data = coop_contacts2_bfdf) # calculate Bayes regression coop_contacts2_bf <- extractBF(coop_contacts2_lmbf)$bf # extract Bayes factor
|
Statistical Test
|
https://osf.io/zcv4m/
|
winke_stevens_2017_rcode.R
|
60 |
Create dummy variable for ethnicity: 0 option 4 only (Anglo/White), 1 any other option / combination of options
|
table(survey$ethnic_group[!duplicated(survey$id)], useNA = "ifany") survey$ethnicity <- ifelse(survey$ethnic_group == "4", 0, 1) table(survey$ethnicity[!duplicated(survey$id)], useNA = "ifany")
|
Data Variable
|
https://osf.io/jpxts/
|
Data_Prep_S1S2.R
|
61 |
Create dummy variable for SES: 1 mother or father completed at least some college (4 some college), 0 otherwise
|
table(survey$mother_educationlevel[!duplicated(survey$id)], useNA = "ifany") table(survey$father_educationlevel[!duplicated(survey$id)], useNA = "ifany") survey$SES <- ifelse(survey$mother_educationlevel >= 4 | survey$father_educationlevel >= 4, 1, 0) table(survey$SES[!duplicated(survey$id)], useNA = "ifany")
|
Data Variable
|
https://osf.io/jpxts/
|
Data_Prep_S1S2.R
|
62 |
Weekend Create dummy variable for weekend: 1 weekend, 0 weekday
|
survey$weekend <- ifelse(weekdays(survey$StartDate, abbr = TRUE) %in% c("Sat", "Sun"), 1, 0)
|
Data Variable
|
https://osf.io/jpxts/
|
Data_Prep_S1S2.R
|
63 |
Create dummy variable for mixed interaction partners
|
survey$no_partners <- apply(survey[c("close_peers", "family", "weak_ties")], 1, sum) survey$mixed_partner <- ifelse(survey$no_partners > 1, 1, 0) table(survey$mixed_partner, useNA = "ifany") # number of observations with mixed interaction partners: 11479 (S1) / 5797 (S2) table(apply(survey[c("close_peers", "family", "weak_ties")], 1, sum), useNA = "ifany") survey$close_peers_all <- survey$close_peers survey$family_all <- survey$family survey$weak_ties_all <- survey$weak_ties survey$close_peers <- ifelse(survey$mixed_partner == 1, NA, survey$close_peers) survey$family <- ifelse(survey$mixed_partner == 1, NA, survey$family) survey$weak_ties <- ifelse(survey$mixed_partner == 1, NA, survey$weak_ties) table(survey$close_peers, useNA = "ifany") # number of observations with interactions with close peers ONLY: 17194 (S1) / 9406 (S2) table(survey$interacting_people[survey$close_peers == 1]) table(survey$family, useNA = "ifany") # number of observations with interactions with family ONLY: 2450 (S1) / 1946 (S2) table(survey$interacting_people[survey$family == 1]) table(survey$weak_ties, useNA = "ifany") # number of observations with interactions with weak ties ONLY: 4336 (S1) / 2823 (S2) table(survey$interacting_people[survey$weak_ties == 1]) table(apply(survey[c("close_peers", "family", "weak_ties")], 1, sum), useNA = "ifany") # 23980 (S1) / 14175 (S2) length(which(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people) | survey$mixed_partner == 1)) table(apply(survey[c("close_peers_all", "family_all", "weak_ties_all")], 1, sum), useNA = "ifany") # 23980, 10176, 1303 (S1) / 14175, 5116, 681 (S2) length(which(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people))) survey$no_partners2 <- apply(survey[c("friends_roommates", "significant_other", "family_all", "weak_ties_all")], 1, sum) survey$mixed_partner2 <- ifelse(survey$no_partners2 > 1, 1, 0) table(survey$mixed_partner2, useNA = "ifany") # number of observations with mixed interaction partners: 13456 (S1) / 6847 (S2) table(apply(survey[c("friends_roommates", "significant_other", "family_all", "weak_ties_all")], 1, sum), useNA = "ifany") survey$friends_roommates2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$friends_roommates) survey$significant_other2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$significant_other) survey$family2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$family_all) survey$weak_ties2 <- ifelse(survey$mixed_partner2 == 1, NA, survey$weak_ties_all) table(survey$friends_roommates2, useNA = "ifany") # number of observations with interactions with friends and roommates ONLY: 12882 (S1) / 6865 (S2) table(survey$interacting_people[survey$friends_roommates2 == 1]) table(survey$significant_other2, useNA = "ifany") # number of observations with interactions with significant others ONLY: 2335 (S1) / 1491 (S2) table(survey$interacting_people[survey$significant_other2 == 1]) table(survey$family2, useNA = "ifany") # number of observations with interactions with family ONLY: 2450 (S1) / 1946 (S2) table(survey$interacting_people[survey$family2 == 1]) table(survey$weak_ties2, useNA = "ifany") # number of observations with interactions with weak ties ONLY: 4336 (S1) / 2823 (S2) table(survey$interacting_people[survey$weak_ties2 == 1]) table(apply(survey[c("friends_roommates2", "significant_other2", "family2", "weak_ties2")], 1, sum), useNA = "ifany") # 22003 (S1) / 13125 (S2) length(which(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people) | survey$mixed_partner2 == 1))
|
Data Variable
|
https://osf.io/jpxts/
|
Data_Prep_S1S2.R
|
64 |
Create dummy variable for significant others
|
survey$significant_other <- ifelse(survey$mixed_mode == 1 | survey$SKIP2 == 1 | survey$OTHER2 == 1 | is.na(survey$interacting_people), NA, ifelse(survey$Significant_other == 1, 1, 0)) table(survey$significant_other, useNA = "ifany") # number of observations with interactions with significant others: 6681 (S1) / 3822 (S2) table(survey$interacting_people[survey$significant_other == 1])
|
Data Variable
|
https://osf.io/jpxts/
|
Data_Prep_S1S2.R
|
65 |
converting PPId and Statement to factors
|
Data$PPID <- as.factor(Data$PPID) Data$Statement <-as.factor(Data$Statement)
|
Data Variable
|
https://osf.io/dh32q/
|
PositivityratingsRscript.R
|
66 |
find Q1, Q3, and interquartile range for values in column A
|
Q1 <- quantile(DV2, .25) Q3 <- quantile(DV2, .75) IQR2 <- IQR(DV2)
|
Data Variable
|
https://osf.io/dzwct/
|
Fisher_Z_3PERIODS_std.R
|
67 |
only keep rows in dataframe that have values within 1.5*IQR of Q1 and Q3
|
no_outliers2 <- subset(df, DV2> (Q1 - 1.5*IQR2) & DV2< (Q3 + 1.5*IQR2)) no_outliers3 <- subset(df, DV3> (Q1 - 1.5*IQR3) & DV3< (Q3 + 1.5*IQR3))
|
Data Variable
|
https://osf.io/dzwct/
|
Fisher_Z_3PERIODS_std.R
|
68 |
Plot beta weights for interaction
|
betalms # called "beta" but represents "b" betalms <- betalms[,1:2] names(betalms) <- c("b", "SE")
|
Visualization
|
https://osf.io/k853j/
|
ESS_openess_2018_perCountry.R
|
69 |
Plot beta weights for engagement
|
betalms_eng betalms_eng <- betalms_eng[,1:2] names(betalms_eng) <- c("b", "SE")
|
Visualization
|
https://osf.io/k853j/
|
ESS_openess_2018_perCountry.R
|
70 |
Arrange b coeff plots
|
grid.arrange(betalms_ope_p, betalms_eng_p, betalms_p, ncol = 3, nrow = 1)
|
Visualization
|
https://osf.io/k853j/
|
ESS_openess_2018_perCountry.R
|
71 |
Plot slopes
|
listofcharts = list() # create empty list for charts index = 0 # zero the index for (df in listofdfs) { index = index + 1 xlab_str = paste0("Economic beliefs in ", names(listofdfs)[index]) listofcharts[[index]] <- ggpredict(listoflms[[index]], terms = c("conservation2_s_c", "polit_eng_c[-0.14,0.14]"), type = "fe") %>% plot(colors = "bw") + ggtitle(names(listoflms[index])) + xlab("NSC") + ylab("Economic beliefs") + labs(linetype = "Political \nengagement") + scale_linetype_manual(values=c("solid", "dashed"), labels = c("Low", "High")) + theme_classic() + theme(legend.position = "none") +
|
Visualization
|
https://osf.io/k853j/
|
ESS_openess_2018_perCountry.R
|
72 |
Does the start model fit the data significantly better as compared to a model without random intercepts over items?
|
tic();; start_min_item_intercepts <- glmer(bin_score ~ input*testmoment*learningtype + (1|participant), family = 'binomial', data = data, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=1e5)));; toc() anova(start_min_item_intercepts, start) # Start model is significantly better (p < .001);; AIC difference of +- 240
|
Statistical Modeling
|
https://osf.io/938ye/
|
Statistical_models_no_T3_criticals.R
|
73 |
Random slope of input over item
|
tic();; input_item <- glmer(bin_score ~ input*testmoment*learningtype + (1+input|item) + (1|participant), family = 'binomial', data = data, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=1e5)));; toc() anova(start, input_item) # Significant improvement (p = .03);; small AIC difference (3). summary(rePCA(input_item)) # All dimensions are supported by the data. >> KEEP
|
Data Variable
|
https://osf.io/938ye/
|
Statistical_models_no_T3_criticals.R
|
74 |
Investigate model fit Inspect residuals with a binned residual plot
|
residualsplot <- binnedplot(fitted(final), resid(final, type = "response"), cex.pts=1, col.int="black", xlab = "Estimated score (as probability)")
|
Visualization
|
https://osf.io/938ye/
|
Statistical_models_no_T3_criticals.R
|
75 |
model 1 is just baseHIne PHQ_9 severity in a simple logistic regression
|
HI_prognostic_model1 = glm(y ~ PHQ9_first, data = X_HI_only, family = "binomial") summary(HI_prognostic_model1)
|
Statistical Modeling
|
https://osf.io/wxgzu/
|
outcome_evaluation_code_v5.R
|
76 |
model 3 is a logistic regression built using elastic net regularization use elastic net to build this model
|
set.seed(12345678) fit_HI = glmnet(data.matrix(X_HI_only_forEN), y_HI_only, family="binomial", alpha=.5) plot(fit_HI,label=TRUE) HI_prognostic_model3 = cv.glmnet(data.matrix(X_HI_only_forEN), y_HI_only, family="binomial", alpha=.5) plot(HI_prognostic_model3) HI_prognostic_model3$lambda.min HI_prognostic_model3$lambda.1se print(coef(HI_prognostic_model3, s = "lambda.min")) print(coef(HI_prognostic_model3, s = "lambda.1se"))
|
Statistical Modeling
|
https://osf.io/wxgzu/
|
outcome_evaluation_code_v5.R
|
77 |
calculate deviance statistic logHIkeHIhood : sum from i1 to N of [ Yi*ln(P(Yi))+(1Yi)*ln(1P(yi))] calculate brier score brier_score: (1/n)*sum(pioi)^2
|
log_likelihood_calculator_HI_1 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_1 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_2 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_2 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_3 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_3 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_4 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_4 = rep(NA,dim(X_HI_only_hold_out)[1]) log_likelihood_calculator_HI_5 = rep(NA,dim(X_HI_only_hold_out)[1]) brier_score_calculator_HI_5 = rep(NA,dim(X_HI_only_hold_out)[1]) for (i in 1:dim(X_HI_only_hold_out)[1]){ log_likelihood_calculator_HI_1[i] = y_HI_only_hold_out[i]*log(HI_prognosis_1[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_1[i]) brier_score_calculator_HI_1[i] = (HI_prognosis_1[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_2[i] = y_HI_only_hold_out[i]*log(HI_prognosis_2[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_2[i]) brier_score_calculator_HI_2[i] = (HI_prognosis_2[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_3[i] = y_HI_only_hold_out[i]*log(HI_prognosis_3[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_3[i]) brier_score_calculator_HI_3[i] = (HI_prognosis_3[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_4[i] = y_HI_only_hold_out[i]*log(HI_prognosis_4[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_4[i]) brier_score_calculator_HI_4[i] = (HI_prognosis_4[i]-y_HI_only_hold_out[i])^2 log_likelihood_calculator_HI_5[i] = y_HI_only_hold_out[i]*log(HI_prognosis_5[i])+(1-y_HI_only_hold_out[i])*log(1-HI_prognosis_5[i]) brier_score_calculator_HI_5[i] = (HI_prognosis_5[i]-y_HI_only_hold_out[i])^2 } deviance_for_HI_model_1 = sum(log_likelihood_calculator_HI_1,na.rm=TRUE) brier_score_for_HI_model_1 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_1,na.rm=TRUE) deviance_for_HI_model_2 = sum(log_likelihood_calculator_HI_2,na.rm=TRUE) brier_score_for_HI_model_2 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_2,na.rm=TRUE) deviance_for_HI_model_3 = sum(log_likelihood_calculator_HI_3,na.rm=TRUE) brier_score_for_HI_model_3 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_3,na.rm=TRUE) deviance_for_HI_model_4 = sum(log_likelihood_calculator_HI_4,na.rm=TRUE) brier_score_for_HI_model_4 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_4,na.rm=TRUE) deviance_for_HI_model_5 = sum(log_likelihood_calculator_HI_5,na.rm=TRUE) brier_score_for_HI_model_5 = (1/dim(X_HI_only_hold_out)[1])*sum(brier_score_calculator_HI_5,na.rm=TRUE) print(round(deviance_for_HI_model_1,1)) print(round(brier_score_for_HI_model_1,3)) print(round(deviance_for_HI_model_2,1)) print(round(brier_score_for_HI_model_2,3)) print(round(deviance_for_HI_model_3,1)) print(round(brier_score_for_HI_model_3,3)) print(round(deviance_for_HI_model_4,1)) print(round(brier_score_for_HI_model_4,3)) print(round(deviance_for_HI_model_5,1)) print(round(brier_score_for_HI_model_5,3))
|
Statistical Test
|
https://osf.io/wxgzu/
|
outcome_evaluation_code_v5.R
|
78 |
here we create a PAIstyle model that includes variables and their interaction with tx we use this model as a demonstration:
|
differential_model_6 = glm(y ~ tx*(PHQ9_first+WSAS_first+Employment_binary+Ethnicity_binary+GAD7_first+Phobia_Q3_first), data = X_training, family = "binomial") summary(differential_model_6)
|
Statistical Modeling
|
https://osf.io/wxgzu/
|
outcome_evaluation_code_v5.R
|
79 |
grab the indices of which individuals got tx0 (li) and tx1 (hi)
|
tx_HI_i = which(X_hold_out$tx==1, arr.ind = TRUE) tx_LI_i = which(X_hold_out$tx==0, arr.ind = TRUE) step_size = 150 window_size = 300 bin_number = ceiling((dim(X_hold_out)[1]-window_size)/step_size)
|
Data Variable
|
https://osf.io/wxgzu/
|
outcome_evaluation_code_v5.R
|
80 |
the below command can be used to see what y limits and x limits should be used to standardize all plots
|
cat("y_limits",round(c(min(observed_differential_response),max(observed_differential_response)),3),"\n") cat("x_limits",round(c(min(predicted_differential_response),max(predicted_differential_response)),3),"\n") cat("predicted avg differential response (full sample avg):",round(mean(differential_prediction),3),"\n") cat("predicted avg differential response (binned avg):",round(mean(predicted_differential_response),3),"\n") predicted_avg_diff = mean(differential_prediction) plot(predicted_differential_response,observed_differential_response,'p',ylim=c(-.1,.3), xlim = c(-.1,.3),xlab=x_label) plot(predicted_differential_response,observed_differential_response,'p',ylim=c(-.1,.3), xlim = c(min(predicted_differential_response),max(predicted_differential_response)),xlab=x_label) cor_windows = cor(1:bin_number,observed_differential_response) cor_pred_diff = cor(predicted_differential_response,observed_differential_response) cat("windows correlation = ",round(cor_windows,3),"\n") cat("predicted difference correlation = ",round(cor_pred_diff,3),"\n")
|
Visualization
|
https://osf.io/wxgzu/
|
outcome_evaluation_code_v5.R
|
81 |
we can use the tstatistic to adjust for the error around the slope tvalue is the coefficient divided by its standard error
|
cat("slope t-stat = ",round(sc_coefs_summary$t.value[2],3),"\n") print(round(sc_coefs_summary,3)) cat("\n") observed_range = max(observed_differential_response) - min(observed_differential_response) predicted_range = max(predicted_differential_response) - min(predicted_differential_response) tstat = sc_coefs_summary$t.value[2] slope = sc_coefs_summary$Estimate[2] model_evaluations[k,] = c(tstat, slope, observed_range, predicted_range, predicted_avg_diff, cor_pred_diff) model_predictions[k,] = predicted_differential_response model_results[k,] = observed_differential_response } print(round(model_evaluations,3))
|
Statistical Test
|
https://osf.io/wxgzu/
|
outcome_evaluation_code_v5.R
|
82 |
comparisons to average for each trait
|
summary(lmer(RATINGc ~ TSELFc* EX + SMEANc* EX + SDMEAN* EX +PSELFc* EX + (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* OP + SMEANc* OP + SDMEAN* OP +PSELFc*OP+ (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* AG + SMEANc* AG + SDMEAN* AG +PSELFc*AG + (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* NE + SMEANc* NE + SDMEAN* NE +PSELFc*NE+ (TSELFc + SMEANc + SDMEANc +PSELFc | PID),data= subset(fmimlm, fmimlm$motive == 0 ) )) summary(lmer(RATINGc ~ TSELFc* CO + SMEANc* CO + SDMEAN* CO+PSELFc*CO+ (TSELFc + SMEANc + SDMEANc +PSELFc | PID), data= subset(fmimlm,fmimlm$motive == 0 ) ))
|
Data Variable
|
https://osf.io/ns4h9/
|
RScript_motives.R
|
83 |
Conduct RSA We want to estimate the RSA model in which the intercept is allowed to vary across states. The equation for individual i living in state j (z selfesteem, x IV, y SV) is: zij b0 + b1*xij + b2*yj + b3*xij^2 + b4*xij*yj + b5*yj^2 + uj + eij specify and estimate this model:
|
m.c <- lmer(selfesteem ~ IV.c + SV.c + IV2.c + IVSV.c + SV2.c + (1 | state), data = df) summary(m.c)
|
Statistical Modeling
|
https://osf.io/jhyu9/
|
example_Rcode_mlrsa_osf_oneL1pred.R
|
84 |
Plot the average surface using MLRSA_AverageSurfacePlot:
|
MLRSA_AverageSurfacePlot(m.c, name_vars=c("IV.c","SV.c","IV2.c","IVSV.c","SV2.c"), outcome="selfesteem", data=df, xlab="Individual-level values", ylab="State-level values", zlab="Self-esteem")
|
Visualization
|
https://osf.io/jhyu9/
|
example_Rcode_mlrsa_osf_oneL1pred.R
|
85 |
only keeping latest reported dates for each patient
|
data_lab_res_dcr <- data_lab[!duplicated(data_lab$shcsid,fromLast=TRUE),] data_drug_res_dcr <- data_drug[!duplicated(data_drug$shcsid,fromLast=TRUE),] data_dis_res_dcr <- data_dis[order(data_dis$shcsid,data_dis$newdate),] data_dis_res_dcr <- data_dis_res_dcr[!duplicated(data_dis_res_dcr$shcsid,fromLast=TRUE),]
|
Data Variable
|
https://osf.io/gy5vm/
|
preprocess_SHCS.R
|
86 |
Plots with H(A|M) nonnormalized conditional entropy Getting mean, median and standard deviation across different motifs per period
|
N <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = length) MEAN <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = mean) MEDIAN <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = median) SD <- aggregate(CEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = sd) resultsmotifs_summary <- cbind.data.frame(N, MEAN$CEAuthoritiesMotifs, MEDIAN$CEAuthoritiesMotifs, SD$CEAuthoritiesMotifs) colnames(resultsmotifs_summary) <- c("DATE","N","MEAN","MEDIAN","SD") resultsmotifs_summary$SE <- resultsmotifs_summary$SD / sqrt(resultsmotifs_summary$N)
|
Visualization
|
https://osf.io/uckzx/
|
P1_motif-by-motif_newbins.R
|
87 |
Plots with H(A|M)/H(A) normalized conditional entropy
|
resultsmotifs <- as.data.frame(rbind(results330,results350,results370,results390,results405,results415,results425, results435,results455,results470,results490,results600))
|
Visualization
|
https://osf.io/uckzx/
|
P1_motif-by-motif_newbins.R
|
88 |
Getting mean, median and standard deviation across different motifs per period
|
N <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = length) MEAN <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = mean) MEDIAN <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = median) SD <- aggregate(NormCEAuthoritiesMotifs ~ DATE, data = resultsmotifs, FUN = sd) Nresultsmotifs_summary <- cbind.data.frame(N, MEAN$NormCEAuthoritiesMotifs, MEDIAN$NormCEAuthoritiesMotifs, SD$NormCEAuthoritiesMotifs) colnames(Nresultsmotifs_summary) <- c("DATE","N","MEAN","MEDIAN","SD") Nresultsmotifs_summary$SE <- Nresultsmotifs_summary$SD / sqrt(Nresultsmotifs_summary$N)
|
Statistical Modeling
|
https://osf.io/uckzx/
|
P1_motif-by-motif_newbins.R
|
89 |
Plot mean and median H(A|M)/H(A) per motif per period
|
require(ggplot2) ggmean <- ggplot(Nresultsmotifs_summary,aes(x=DATE,y=MEAN)) + labs(title = "P1: Mean normalized conditional entropy of authorities given motifs across motifs", x = "Year BCE", y = "mean H(A|M)/H(A) across motifs") + scale_x_reverse() + geom_errorbar(aes(ymin=Nresultsmotifs_summary$MEAN-Nresultsmotifs_summary$SE, ymax=Nresultsmotifs_summary$MEAN+Nresultsmotifs_summary$SE),width=.1) + geom_line() + geom_point() ggmean ggmedian <- ggplot(Nresultsmotifs_summary,aes(x=DATE,y=MEDIAN)) + labs(title = "P1: Median normalized conditional entropy of authorities given motifs across motifs", x = "Year BCE", y = "median H(A|M)/H(A) across motifs") + scale_x_reverse() + geom_line() + geom_point() ggmedian
|
Visualization
|
https://osf.io/uckzx/
|
P1_motif-by-motif_newbins.R
|
90 |
In renamed "gender" variable replace "son" with "Male" etc.
|
StataGDIM$gender <- gsub('son', 'Male', StataGDIM$gender) StataGDIM$gender <- gsub('daughter', 'Female', StataGDIM$gender) StataGDIM$gender <- gsub('all', 'Mixed', StataGDIM$gender)
|
Data Variable
|
https://osf.io/pk9my/
|
PNAS_Social_Mobility_public.R
|
91 |
Create cohort (decade of birth) variable in ACE data
|
StataACE <- StataACE %>% mutate(cohort = as.integer(dob_midrange/10)*10)
|
Data Variable
|
https://osf.io/pk9my/
|
PNAS_Social_Mobility_public.R
|
92 |
Create short cohort ID by concatenating two character vectors. cohortIDshort is created to reduce clutter in multipanel figures
|
ETmerged$cohort_short <- substring(ETmerged$cohort, 3) ETmerged$cohortIDshort <- paste(ETmerged$wbcode, ETmerged$cohort_short, sep = "", collapse = NULL)
|
Data Variable
|
https://osf.io/pk9my/
|
PNAS_Social_Mobility_public.R
|
93 |
Inverse variance weightings for variance of h2, c2 and e2
|
ETmerged<-ETmerged %>% mutate (h2weight = 1/h2var) ETmerged<-ETmerged %>% mutate (c2weight = 1/c2var) ETmerged<-ETmerged %>% mutate (e2weight = 1/e2var)
|
Statistical Modeling
|
https://osf.io/pk9my/
|
PNAS_Social_Mobility_public.R
|
94 |
Manually derive pvalues from tvalues for Alt_Norway (for a twosided ttest):
|
2*pt(abs(-1.0320512), df=14,lower.tail=FALSE) 2*pt(abs(2.48367345), df=14,lower.tail=FALSE) 2*pt(abs(-2.828329), df=14,lower.tail=FALSE) 2*pt(abs(1.2811099), df=14,lower.tail=FALSE) 2*pt(abs(3.4124371), df=14,lower.tail=FALSE) 2*pt(abs(0.6433726), df=14,lower.tail=FALSE) 2*pt(abs(-1.0321), df=14,lower.tail=FALSE) 2*pt(abs(2.4837), df=14,lower.tail=FALSE) 2*pt(abs(-2.8283), df=14,lower.tail=FALSE)
|
Statistical Test
|
https://osf.io/pk9my/
|
PNAS_Social_Mobility_public.R
|
95 |
correlations separately for each partner partner 1
|
chart.Correlation(data_dyadic[ , c("pes_1","p_self_1","d_self_1","p_other_1","d_other_1")], use = "pairwise.complete.obs", pch = 20, histogram = TRUE)
|
Data Variable
|
https://osf.io/sb3kw/
|
Study3B_analyses.R
|
96 |
Remove" AoI > 1200 or < 50 ms
|
prevalues_gaze <- sum(PAST_O_19$gazedur > 0, na.rm=T)+sum(PAST_M_19$gazedur > 0, na.rm=T)+ sum(PRES_M_19$gazedur > 0, na.rm=T)+sum(PRES_O_19$gazedur > 0, na.rm=T) prevalues_fix <- sum(PAST_O_19$fixdur > 0, na.rm=T)+sum(PAST_M_19$fixdur > 0, na.rm=T)+ sum(PRES_M_19$fixdur > 0, na.rm=T)+sum(PRES_O_19$fixdur > 0, na.rm=T) PAST_M_19$gazedur[PAST_M_19$gazedur > 1200 | PAST_M_19$gazedur < 50] <- NaN PAST_M_19$fixdur[PAST_M_19$fixdur > 1200 | PAST_M_19$fixdur < 50] <- NaN PRES_O_19$gazedur[PRES_O_19$gazedur > 1200 | PRES_O_19$gazedur < 50] <- NaN PRES_O_19$fixdur[PRES_O_19$fixdur > 1200 | PRES_O_19$fixdur < 50] <- NaN PRES_M_19$gazedur[PRES_M_19$gazedur > 1200 | PRES_M_19$gazedur < 50] <- NaN PRES_M_19$fixdur[PRES_M_19$fixdur > 1200 | PRES_M_19$fixdur < 50] <- NaN PAST_O_19$gazedur[PAST_O_19$gazedur > 1200 | PAST_O_19$gazedur < 50] <- NaN PAST_O_19$fixdur[PAST_O_19$fixdur > 1200 | PAST_O_19$fixdur < 50] <- NaN postvalues_gaze <- sum(PAST_O_19$gazedur > 0, na.rm=T)+sum(PAST_M_19$gazedur > 0, na.rm=T)+ sum(PRES_M_19$gazedur > 0, na.rm=T)+sum(PRES_O_19$gazedur > 0, na.rm=T) postvalues_fix <- sum(PAST_O_19$fixdur > 0, na.rm=T)+sum(PAST_M_19$fixdur > 0, na.rm=T)+ sum(PRES_M_19$fixdur > 0, na.rm=T)+sum(PRES_O_19$fixdur > 0, na.rm=T) dataloss_fix <- prevalues_fix-postvalues_fix dataloss_fix_percentage <- (1-(postvalues_fix/prevalues_fix))*100 dataloss_gaze <- prevalues_gaze-postvalues_gaze dataloss_gaze_percentage <- (1-(postvalues_gaze/prevalues_gaze))*100
|
Data Variable
|
https://osf.io/qynhu/
|
subject19.R
|
97 |
Distributional properties of the individual items for the Germanspeaking sample
|
jmv::descriptives( data = DataGerman, vars = vars(DMW1, DMW2, DMW3, DMW4, SMW1, SMW2, SMW3, SMW4, SBPS1, SBPS2, SBPS3, SBPS4, SBPS5, SBPS6, SBPS7, SBPS8), freq = TRUE, hist = TRUE, violin = TRUE, skew = TRUE, kurt = TRUE)
|
Data Variable
|
https://osf.io/tg3fq/
|
syntax_SDMWS&SBPS.R
|
98 |
Distributional properties of the individual items for the US sample
|
jmv::descriptives( data = DataUS, vars = vars(DMW1, DMW2, DMW3, DMW4, SMW1, SMW2, SMW3, SMW4, SBPS1, SBPS2, SBPS3, SBPS4, SBPS5, SBPS6, SBPS7, SBPS8), freq = TRUE, hist = TRUE, violin = TRUE, skew = TRUE, kurt = TRUE)
|
Data Variable
|
https://osf.io/tg3fq/
|
syntax_SDMWS&SBPS.R
|
99 |
Network analysis using EBICglasso
|
n1<-estimateNetwork(DataAll, default= "EBICglasso") plot(n1, groups = gr, nodeNames = names, legend.cex=.35) centrality_auto(n1, weighted = TRUE, signed = TRUE) centralityPlot(n1, include =c("Betweenness","Closeness", "Strength")) print(n1)
|
Statistical Modeling
|
https://osf.io/tg3fq/
|
syntax_SDMWS&SBPS.R
|
100 |
Network based on correlations
|
n1a<-estimateNetwork(DataAll, default= "cor") plot(n1a, groups = gr, legend.cex=.35)
|
Statistical Modeling
|
https://osf.io/tg3fq/
|
syntax_SDMWS&SBPS.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.