ID
int64
1
1.07k
Comment
stringlengths
8
1.13k
Code
stringlengths
10
4.28k
Label
stringclasses
4 values
Source
stringlengths
21
21
File
stringlengths
4
82
301
_gaze duration Construct a maximal lmer() model
AOIKey.GazeDur.max <- lmer(logGazeDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOICohcue.GazeDur.max <- lmer(logGazeDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOISpillover.GazeDur.max <- lmer(logGazeDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE)
Statistical Modeling
https://osf.io/hn3bu/
AnalysisCode.R
302
_regressions out Construct a maximal glmer() model
AOIKey.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = "binomial", control = glmerControl(optimizer ="bobyqa")) AOICohcue.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = "binomial", control = glmerControl(optimizer ="bobyqa")) AOISpillover.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = "binomial", control = glmerControl(optimizer ="bobyqa")) AOISpillover.RegrOut.max <- glmer(IA_REGRESSION_OUT ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, family = "binomial", control = glmerControl(optimizer ="bobyqa"))
Statistical Modeling
https://osf.io/hn3bu/
AnalysisCode.R
303
_firstfixation duration Construct a maximal lmer() model
AOISpillover.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOIWrapUp.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOICohcue.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOISpillover.FF.max <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE)
Statistical Modeling
https://osf.io/hn3bu/
AnalysisCode.R
304
Creating subsets of coins by time period and obtaining measures for each of the time periods
for (i in dates) { dsub <- (subset(df, df$DATE == i)) motifs <- cbind(dsub[318:681]) denom <- cbind(dsub[256:309]) DATE[i] <- i HDenomination[i] <- entropy(denom) HMotifs[i] <- entropy(motifs) CEDenominationsMotifs[i] <- condentropy(denom, motifs) NormCEDenominationsMotifs[i] <- condentropy(denom, motifs) / entropy(denom) CEMotifsDenominations[i] <- condentropy(motifs, denom) NormCEMotifsDenominations[i] <- condentropy(motifs, denom) / entropy(motifs) MI[i] <- mutinformation(denom, motifs) NDenominations[i] <- length(unique(dsub$DENOMINATION)) NCoins[i] <- length(unique(dsub$ID)) }
Data Variable
https://osf.io/uckzx/
P2_analysis_newbins.R
305
STATISTICAL TESTING: nonparametric Spearman's correlation nonnormalized conditional entropy
cor.test(results$DATE, results$CEDenominationsMotifs, method = "spearman") rdates <- rev(dates) #dates = years BCE plot(results$DATE, results$CEDenominationsMotifs, xlim = c(600,330), xaxt='n', xlab = "YEAR BCE", ylab = "H(D|d)", main = "P2: Conditional entropy of denomination given designs") axis(1, at = rdates, labels = rdates)
Statistical Test
https://osf.io/uckzx/
P2_analysis_newbins.R
306
Plots with H(D|d) nonnormalized conditional entropy Getting mean, median and standard deviation across different authorities per period for nonnormalized conditional entropy of denomination given designs
N <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = length) MEAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = mean) MEDIAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = median) SD <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = sd) resultspoleis_summary <- cbind.data.frame(N, MEAN$CEDenominationsMotifs, MEDIAN$CEDenominationsMotifs, SD$CEDenominationsMotifs) colnames(resultspoleis_summary) <- c("DATE","N","MEAN","MEDIAN","SD") resultspoleis_summary$SE <- resultspoleis_summary$SD / sqrt(resultspoleis_summary$N)
Visualization
https://osf.io/uckzx/
P2_analysis_newbins.R
307
Plot mean and median H(D|d) across authorities per period
require(ggplot2) ggmean <- ggplot(resultspoleis_summary,aes(x=DATE,y=MEAN)) + labs(title = "P2: Mean conditional entropy of denominations given designs across authorities", x = "Year BCE", y = "mean H(D|d) across authorities") + scale_x_reverse() + geom_errorbar(aes(ymin=resultspoleis_summary$MEAN-resultspoleis_summary$SE, ymax=resultspoleis_summary$MEAN+resultspoleis_summary$SE),width=.1) + geom_line() + geom_point() ggmean ggmedian <- ggplot(resultspoleis_summary,aes(x=DATE,y=MEDIAN)) + labs(title = "P2: Median conditional entropy of denominations given designs across authorities", x = "Year BCE", y = "median H(D|d) across authorities") + scale_x_reverse() + geom_line() + geom_point() ggmedian
Visualization
https://osf.io/uckzx/
P2_analysis_newbins.R
308
REGRESSION ANALYSIS: GROUPING BY AUTHORITIES nonnormalized CE
resultspoleis$AUTHORITIES <- rownames(resultspoleis) resultspoleis$DATE <- as.numeric(as.character(resultspoleis$DATE))
Statistical Modeling
https://osf.io/uckzx/
P2_analysis_newbins.R
309
Meanimputes missing values for a vector x NOTE: see http://www.mailarchive.com/[email protected]/msg58289.html Args: x: numeric vector Returns: x, with missing values replaced by mean(x)
return(replace(x, is.na(x), mean(x, na.rm = T))) } makeConstructMatrix <- function(net, a) {
Data Variable
https://osf.io/2phst/
mse_values_for_density_smoother.R
310
how many missing data for WVSES questions?
sum(is.na(df21$WVSE1)) sum(is.na(df21$WVSE2))
Data Variable
https://osf.io/9jzfr/
20180714Study2analysisscriptextradata.R
311
To log transform Years
TEST_data <- TEST_data %>% mutate(Yearslog = log(Years))
Data Variable
https://osf.io/4g2tu/
custom_functions.R
312
Making factors for reasons based on factor analysis. This is making a new data frames that also has plans
TEST_data_withplans <- dplyr::select( TEST_data, c( 'Hierarchy':'Cooperate', 'plan_materials', 'plan_MS', 'self_MS_prestige', 'self_MS_cooper', 'self_materials_prestige', 'self_materials_cooper' ) )
Data Variable
https://osf.io/4g2tu/
custom_functions.R
313
Word Cloud GG plot
ggwordcloudc <- function(x,y){ freqt<-count(x,y) set.seed(42) ggplot(freqt, aes( label = y, size = n, color=factor(sample.int(10,nrow(x),replace=TRUE)))) + geom_text_wordcloud(area_corr = TRUE) + scale_size_area(max_size = 24) + theme_minimal() }
Visualization
https://osf.io/4g2tu/
custom_functions.R
314
Calculate quantiles for eigenvalues, but only store those from simulated CF model in percentile1
percentile <- apply(parallel$values, 2, function(x) quantile(x, .95)) min <- as.numeric(nrow(obs)) min <- (4 * min) - (min - 1) max <- as.numeric(nrow(obs)) max <- 4 * max percentile1 <- percentile[min:max] percentile <- apply(parallel$values, 2, function(x) quantile(x, .95)) min <- as.numeric(nrow(obs)) min <- (4 * min) - (min - 1) max <- as.numeric(nrow(obs)) max <- 4 * max percentile1 = percentile[min:max]
Data Variable
https://osf.io/4g2tu/
custom_functions.R
315
Label the yaxis 'Eigenvalue'
scale_y_continuous(name = 'Eigenvalue') +
Visualization
https://osf.io/4g2tu/
custom_functions.R
316
Label the xaxis 'Factor Number', and ensure that it ranges from 1max of factors, increasing by one with each 'tick' mark.
scale_x_continuous(name = 'Factor Number', breaks = min(eigendat$num):max(eigendat$num)) +
Visualization
https://osf.io/4g2tu/
custom_functions.R
317
drop row without fid data and without distance burrow data
fid <- fid[!is.na(fid$FID),] fid <- fid[!is.na(fid$dist_burrow),]
Data Variable
https://osf.io/3wy58/
bivariate_model_summer_revision.R
318
get probability contours for plot
kd <- ks::kde(plot_data, compute.cont=TRUE) contour_90 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["10%"])[[1]]) contour_90 <- data.frame(contour_90) contour_80 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["20%"])[[1]]) contour_80 <- data.frame(contour_80) contour_70 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["30%"])[[1]]) contour_70 <- data.frame(contour_70) contour_60 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["40%"])[[1]]) contour_60 <- data.frame(contour_60) contour_50 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["50%"])[[1]]) contour_50 <- data.frame(contour_50) contour_40 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["60%"])[[1]]) contour_40 <- data.frame(contour_40) contour_30 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["70%"])[[1]]) contour_30 <- data.frame(contour_30) contour_20 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["80%"])[[1]]) contour_20 <- data.frame(contour_20) contour_10 <- with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]], z=estimate, levels=cont["90%"])[[1]]) contour_10 <- data.frame(contour_10)
Visualization
https://osf.io/3wy58/
bivariate_model_summer_revision.R
319
pcurve pcurve(m.cor) Funnel plot, symmetry test (Egger's regression), failsafe n
pdf("Self_control_Funnel.pdf",width=5,height=5) funnel(m.cor,xlab = "Correlation") dev.off() eggers.test(x = m.cor) fsn(yi=m.cor$TE, sei=m.cor$seTE, type="Rosenthal") plot(copas(m.cor)) copas(m.cor)
Visualization
https://osf.io/sqfnt/
Goal
320
H3a: stress ~ trust set Cauchy prior (0, 1) as stated in the preregistration
prior.coef <- brms::prior(cauchy(0,1),class='b')
Statistical Modeling
https://osf.io/z39us/
Posthoc_H3b.R
321
Fit logistic model to each individual's data to estimate PSEs
jdat$PSE <- 500 for(i in as.numeric(levels(jdat$Subject))){ dat <- subset(jdat, Subject==i) if(dat$cannot_fit[1]==0){ fit.glm <- glm(faster ~ Distort, family=binomial, data=dat) jdat$PSE[jdat$Subject==i] <- -coef(fit.glm)[1]/coef(fit.glm)[2] #PSE is -b0/b1 } }
Statistical Modeling
https://osf.io/wxgm5/
Exp2_judgment.R
322
Linear mixed effects model for graded judgments
mod_full <- lmer(GradedJudge ~ Context*(GMSI_Gen_Z + Order*Distort) + (1|Subject), data=jdat2) summary(mod_full) Anova(mod_full, type=3, test='Chisq')
Statistical Modeling
https://osf.io/wxgm5/
Exp2_judgment.R
323
Custom function for weighted Likert plot
panel <- function(...){ HH::panel.likert(...) vals <- list(...) df <- data.frame(x = vals$x, y = vals$y, groups = vals$groups) grps <- as.character(df$groups) for(i in 1:length(colnames(likertscipop.df))){ grps <- sub(paste0('^', colnames(likertscipop.df)[i]), i, grps) } df <- df[order(df$y,grps),] df$correctX <- ave(df$x, df$y, FUN = function(x){ x[x < 0] <- rev(cumsum(rev(x[x < 0]))) - x[x < 0]/2 x[x > 0] <- cumsum(x[x > 0]) - x[x > 0]/2 return(x) }) subs <- sub(' Positive$', '', df$groups) collapse <- subs[-1] == subs[-length(subs)] & df$y[-1] == df$y[-length(df$y)] df$abs <- abs(df$x) df$abs[c(collapse, F)] <- df$abs[c(collapse, F)] + df$abs[c(F, collapse)] df$correctX[c(collapse, F)] <- 0 df <- df[c(T, !collapse),] df$perc <- round(ave(df$abs, df$y, FUN = function(x){x/sum(x) * 100}), 1) df$perc <- paste0(df$perc,'%') df$perc[df$perc == "0%"] <- "" lattice::panel.text(x = df$correctX, y = df$y, label = df$perc, cex = 1.2, font = 1, col = "white") }
Visualization
https://osf.io/qj4xr/
02_prevalence-of-scipop.R
324
calculate mean and sd in deduplicated data set
means <- c(means, paste0(Round(mean(desc[!duplicated(desc$id), i])), " (", Round(sd(desc[!duplicated(desc$id), i])), ")")) } }
Data Variable
https://osf.io/nxyh3/
02b_Descriptives_Study2.R
325
For each person combine component scores F, with loading matrix B and error values E to obtain latent structure For each person standardized the data using the withinperson mean and standard deviation
Y = matrix(0,nrow(data),P) colnames(Y) = sprintf("Y%d",seq(1:P)) Error.var = matrix(0,nrow(data),P) colnames(Error.var) = sprintf("E%d",seq(1:P)) FB.var = matrix(0,nrow(data),P) colnames(FB.var) = sprintf("FB%d",seq(1:P)) for (i in 1:N){ n.i = which(data$ID==i) E = sqrt(var.E)*mvrnorm(length(n.i), rep(0,P), diag(P)) Y[n.i,] = F[n.i,] %*% t(B) + E Error.var[n.i,] = E FB.var[n.i,] = F[n.i,] %*% t(B) } data = cbind(data,Y) return(list(Sigma.Psi=NULL,Psi=Psi,Psi.i=NULL,lambda.max.list=NULL,Components=F,data=data, Error.var=Error.var,FB.var=FB.var)) }
Statistical Modeling
https://osf.io/rs6un/
Data.PC.VAR.Fixed.R
326
Statistical analysis Make GramGender a factor with two levels (feminine, masculine)
subset.for.plot$GramGender <- as.factor(subset.for.plot$GramGender)
Data Variable
https://osf.io/przvy/
study1-rscript.R
327
Summary statistics on the reported frequence of the structures
data_struc_names <- as.data.frame(sort(table(data$structures_names))) data_struc_names range(table(data$structures_names)) mean(table(data$structures_names)) sd(table(data$structures_names)) median(table(data$structures_names)) sort(table(data$structures_names))
Data Variable
https://osf.io/fwc2p/
Keuken_etal_UHF_MRI_review_analysis_script.r
328
Exclude subjects who have missing values on the ability test or on selfviewed ability
mst2 <- subset(mst2, (Raven_self != "NA") & (Raven_obj != "NA") )
Data Variable
https://osf.io/m6pb2/
Data_preparation_Sample_A.R
329
DESCRIPTIVE STATISTICS compute and save sample statistics (age distribution, number of females)
age <- round(select(psych::describe(mst2_descr$age), n, min, max, mean, sd),2) age$n <- nrow(mst2_descr) sampstats <- mutate(age, female=plyr::count(mst2_descr$sex)[plyr::count(mst2_descr$sex)[,1]=="1",]["freq"] ) write.table(sampstats, file="Descriptives/age_sex_Sample_A_mst2.dat", sep="\t", row.names=FALSE)
Data Variable
https://osf.io/m6pb2/
Data_preparation_Sample_A.R
330
get estimated variance of tanh1 (p hat)
dvartanh <- (1-(pe^2))^2 vartanh <- v/dvartanh vartanh
Statistical Modeling
https://osf.io/9jzfr/
metaBigFiveextraversion.R
331
We have weak evidence between precohort and coronacohort groups test for strong invariance fit model as multigroup model
est_s <- cfa(mod_s,quop_use, estimator = "MLR", missing = "FIML", group = "corona", group.equal = c("loadings","intercepts"), cluster = "class")
Statistical Test
https://osf.io/vphyt/
Text_Level.R
332
find the stations that have 90% of data after 2001
Pdata_2000 <- Pdata[Dates >= "2000-01-01",] result <- rep(0,(ncol(Pdata_2000)-3)) for (i in 4:ncol(Pdata_2000)) { result[i-3] <- sum(ifelse(is.na(Pdata_2000[,i]),1,0))/nrow(Pdata_2000) }
Data Variable
https://osf.io/5ezfk/
SantaLuciaStationsToPCPFile.R
333
result indicates the fraction of NA data for the stations throw out all the columns and rows where result >0.1
Pdata_new <- Pdata_2000[,-(which(result>0.1)+3)] Stations <- Stations[-which(result>0.1),] sub_b_sp <- SpatialPoints(cbind(Subbasins$Long_, Subbasins$Lat), proj4string=CRS("+proj=longlat +datum=WGS84 +no_defs"))
Data Variable
https://osf.io/5ezfk/
SantaLuciaStationsToPCPFile.R
334
create dichotomous variable coded as 0 no, 1 yes
dat[, pb_investigated_dich := ifelse(pb_investigated == "yes", 1, 0)]
Data Variable
https://osf.io/dqc3y/
calc_vars.R
335
show ttest, cohen's d, and corrected pvals + CIs for all tests
idx = 1 for (i in all_groups) { cat("\n") print(sprintf('TEST: %s, %s = %d', equation, grouping, i)) t <- t.test(formula = eval(parse(text = equation)), data = filter(df, .data[[grouping]] == i), paired = TRUE, conf.level = ci_level) print(t) print(sprintf('adjusted pval: %.4f', pvals_adj[idx])) d <- effsize::cohen.d(formula = eval(parse(text = paste0(equation, '| Subject(id)'))), data = filter(df, .data[[grouping]] == i), paired = TRUE) print(d)
Statistical Test
https://osf.io/xgwzf/
utils.R
336
show wilcox test, cliff's d, and corrected pvals for all tests
idx = 1 for (i in all_groups) { print(sprintf('TEST: %s, %s = %d', equation, grouping, i)) w <- wilcox.test(formula = eval(parse(text = equation)), data = filter(df, .data[[grouping]] == i), paired = TRUE) print(w) print(sprintf('adjusted pval: %.4f', pvals_adj[idx])) d <- effsize::cliff.delta(formula = eval(parse(text = equation)), data = filter(df, .data[[grouping]] == i), paired = TRUE) print(d)
Visualization
https://osf.io/xgwzf/
utils.R
337
fit tree with quadratic model
hrstree <- semtree(fitted_quadratic_lgcm, as.data.frame(rndhrs_subset), control=semtree.control(verbose=TRUE, method="naive", missing="party", min.bucket = 500, min.N = 250, exclude.heywood = FALSE
Statistical Modeling
https://osf.io/3uyjt/
tree.R
338
then determine which response was given unbiased guessing probablity of 1/N
p.guess <- rep(1/n.acc, n.acc) out$R[timer.replace] <- sample(1:n.acc, size=sum(timer.replace), replace=TRUE, prob=p.guess) } out }
Statistical Modeling
https://osf.io/tbczv/
TRDM-functions.r
339
BassAckward EFA We use bassAckward() to get the correlations between factors from successive solutions, so that we can create the hierarchical diagrams. The correlations are contained in the "bass.ack" result (See capture.output below). In multifactor solutions, the factors obtained from fa() and bassAckward() can be matched by their sequential order (i.e., first factor in fa() is the same as the first factor from bassAckward()). Note: BassAckwards is consistent with faCor but not factor.scores
bass.usm=bassAckward(usm, nfactors=7, fm='ml', cut = .45, lr=F, items=F, plot=T) bass.uss=bassAckward(uss, nfactors=8, fm='ml', cut = .45, lr=F, items=F, plot=T) bass.its=bassAckward(its, nfactors=9, fm='ml', cut = .45, lr=F, items=F, plot=T) bassAckward.diagram(bass.its, lr=T, items=F, cut=.6) capture.output(bass.usm[["bass.ack"]],file='bassAck USM.csv') capture.output(bass.uss[["bass.ack"]],file='bassAck USS.csv') capture.output(bass.its[["bass.ack"]],file='bassAck ITS.csv')
Data Variable
https://osf.io/w7afh/
EFA script.R
340
In the loop below, users define the datasets and the number of solutions to obtain, and the script returns the fa() results, the structure matrices with item labels, and factor congruence coefficients across samples. res stores complete fa results load stores structure matrices pv stores percent of variance accounted by the factors in each solution fcong stores matrices with factor congruence coefficients
for (i in 1:9) { # i = number of solutions we want for(s in seq_along(dl)) { # for each sample in "dl"... id=c('M','U','I')[s] # letter identifier for each sample: M = Mturk, U = US students, I = Italian students fa1=fa(dl[[s]],nfactors=i,fm='ml') # conduct EFA (oblimin rotation, ML estimation) pv[[s]][[i]]=fa1$Vaccounted[2,] # proportion variance accounted for by each factor colnames(fa1$Structure)=sub('ML',id,colnames(fa1$Structure)) # rename the factor labels according to sample res[[s]][[i]]=fa1 # append fa results to list load[[s]][[i]]=merge(unclass(fa.sort(fa1$Structure)), labels, by='row.names', sort=F) # attach item labels to structure loadings and append to list write.table(rbind(load[[s]][[i]],''), file=paste0("load",s,".csv"), row.names=F, sep=',', append=T) # write matrix of structure loadings to file } fcong[[paste0(i,'_USM_USS')]]=fa.congruence(res[[1]][[i]],res[[2]][[i]],structure=T) # append factor congruence coefficients for usm and uss fcong[[paste0(i,'_USM_ITS')]]=fa.congruence(res[[1]][[i]],res[[3]][[i]],structure=T) fcong[[paste0(i,'_USS_ITS')]]=fa.congruence(res[[2]][[i]],res[[3]][[i]],structure=T) } capture.output(fcong,file='fcong.csv') capture.output(pv,file='pvaccounted.csv') rm(fa1,fcong,load,pv,res,i,s,id) # cleaning afterwards
Statistical Modeling
https://osf.io/w7afh/
EFA script.R
341
Onefactor EFA Here, we obtain the correlations among the four factors (obliquely rotated) and use them to find the factor loadings on a general factor We also compute and save the factor scores for the 4 factors and the general factor 4factor EFA
fa4.usm=fa(usm[,(items),with=F], nfactors=4, fm='ml') fa4.uss=fa(uss[,(items),with=F], nfactors=4, fm='ml') fa4.its=fa(its[,(items),with=F], nfactors=4, fm='ml')
Statistical Modeling
https://osf.io/w7afh/
EFA script.R
342
1factor EFA using the factor scores (we need these scores in order to obtain the general factor scores)
fa1.usm=fa(usm[,(fnames),with=F], nfactors=1, fm='ml') fa1.uss=fa(uss[,(fnames),with=F], nfactors=1, fm='ml') fa1.its=fa(its[,(fnames),with=F], nfactors=1, fm='ml')
Statistical Modeling
https://osf.io/w7afh/
EFA script.R
343
in case xlim is set to 0, and some value has been given to xrange, center plot symmetrically on zero, using maximal extension in case range is set to 0, and given range otherwise
if (xlim[1]==0 & !is.null(xrange)) { if (xrange==0) { maxext <- max(abs(min(paramSampleVec)), abs(max(paramSampleVec))) #largest extension into positive or negative range xlim = c(-maxext, maxext) #centers plot symmetrically on zero } else { xlim = c(-xrange, xrange) } }
Visualization
https://osf.io/qy5sd/
plotPostKO.R
344
compute and save correlation table of selfrated and objective ability measures and outcome aggregates,
varnames <- c("Raven","reasoning_self","MWTB","vocabulary_self","global_selfevaluation","well_being","agency_self","communion_self","agency_peer","communion_peer") outcomes_pils <- dplyr::select(pils, Z_Raven_obj, Z_Raven_self, Z_MWTB_obj, Z_MWTB_self, Z_global_selfeval, Z_well_being, Z_agency_self, Z_comm_self, Z_agency_peer, Z_comm_peer) names(outcomes_pils) <- varnames cor_aggr <- corcons(outcomes_pils) write.table(cor_aggr, file="Descriptives/correlations_aggr_Sample_B_pils.dat", sep="\t")
Statistical Modeling
https://osf.io/m6pb2/
Data_preparation_Sample_B.R
345
Function 5 Stouffer test for a vector of ppvalues
stouffer=function(pp) sum(qnorm(pp),na.rm=TRUE)/sqrt(sum(!is.na(pp)))
Statistical Test
https://osf.io/ujpyn/
pcurve_app4.052.r
346
1.4 Create family to turn t>F and z>chi2
family=test family=ifelse(test=="t","f",family) family=ifelse(test=="z","c",family)
Statistical Test
https://osf.io/ujpyn/
pcurve_app4.052.r
347
1.9 Take value after equal sign, the value of the teststatistic, and put it in vector "equal"
equal=abs(as.numeric(substring(raw,eq+1)))
Data Variable
https://osf.io/ujpyn/
pcurve_app4.052.r
348
Compute ppvalues for the half
pp33.half=ifelse(family=="f" & p<.025, (1/prop25)*( pf(value,df1=df1,df2=df2,ncp=ncp33)-(1-prop25)),NA) pp33.half=ifelse(family=="c" & p<.025, (1/prop25)*(pchisq(value,df=df1, ncp=ncp33)-(1-prop25)),pp33.half) pp33.half=pbound(pp33.half)
Statistical Modeling
https://osf.io/ujpyn/
pcurve_app4.052.r
349
remove lower triangle of correlation matrix
else if(removeTriangle[1]=="lower"){ Rnew <- as.matrix(Rnew) Rnew[lower.tri(Rnew, diag = TRUE)] <- "" Rnew <- as.data.frame(Rnew) }
Data Variable
https://osf.io/xhrw6/
corstars.R
350
mean RTs by condition
pretrain_study_rt <- pretrain_study %>% filter(block == 3) %>% group_by(id, condition) %>% summarise(mean_rt = mean(rt)) %>% ungroup()
Data Variable
https://osf.io/xgwzf/
exp3_analysis.R
351
show get descriptive stats (how many/what proportion of participants reach each accuracy level)
pretrain_test_acc %>% group_by(test_rep) %>% summarise(n_80 = sum(mean_acc > 0.8), n_100 = sum(mean_acc == 1), prop_100 = sum(mean_acc == 1) / length(mean_acc), prop_80 = sum(mean_acc > 0.8) / length(mean_acc)) pretrain_test_acc %>% filter(test_rep == max(pretrain_test_acc$test_rep)) %>% summarise(group_mean_acc = mean(mean_acc), group_sd_acc = sd(mean_acc))
Visualization
https://osf.io/xgwzf/
exp3_analysis.R
352
P1 COUNTED as 6 seconds until sound event to normalize across trials P2 COUNTED AS FROM SOUND ONSENT (6SECONDS) plus 9 SECONDS 15 SECONDS P3 COUNTED AS 20 SECONDS FROM P2 TO ALMOST END OF THE TRIAL ORDER PAPAMETER 'OPA', FISHER Z TRANSFORMED ORDER PARAMETER 'FOPA' P2 processing
OPAmedianP2<-read.csv(file="OPAmedianP2.csv", sep = "", header=FALSE) colnames(OPAmedianP2)<-c(colheaderChronos) DW_OPAmedianP2=add_column(OPAmedianP2, GR, .before = 1) FOPAmedianP2=FisherZ(OPAmedianP2) DW_FOPAmedianP2=add_column(FOPAmedianP2, GR, .before = 1) DL_OPAmedianP2=melt(DW_OPAmedianP2, id=c("GR"), measured=c("colheaderChronos")) DL_FOPAmedianP2=melt(DW_FOPAmedianP2, id=c("GR"), measured=c("colheaderChronos")) colnames(DL_OPAmedianP2)<-c("GrNr", "TrialNr","OPAmedianP2") colnames(DL_FOPAmedianP2)<-c("GrNr", "TrialNr","FOPAmedianP2")
Data Variable
https://osf.io/dzwct/
Fisher_Z_3PERIODS_median.R
353
plotting correlations with qgraph
cor_graph <- qgraph(correlations)
Visualization
https://osf.io/8akru/
workshop_example.R
354
estimate regularized logistic nodewise regression network define where to binarize variables eLASSO (LASSO with EBIC model selection) listwise deletion of missing values (pairwise not possible for regressions)
Ising_net <- estimateNetwork(data, default = "IsingFit", split = split, missing = "listwise", rule = "OR")
Statistical Modeling
https://osf.io/8akru/
workshop_example.R
355
standardize the variables (to obtain standardized coefficients)
dfAD[,c("S","R","H")] <- data.frame(apply(dfAD[,c("S","R","H")], 2, scale)) dfBE[,c("S","R","H")] <- data.frame(apply(dfBE[,c("S","R","H")], 2, scale)) dfCF[,c("S","R","H")] <- data.frame(apply(dfCF[,c("S","R","H")], 2, scale))
Statistical Modeling
https://osf.io/fbshg/
ComF_SOM_Rcode.R
356
run logistic regression on each subset, and predict the probability of outcome for each country at each wave. these data points are used for the metaanalysis
response <- deparse(form[[2]]) data_list <- lapply(1:nrow(sub_data), function(i) { dat <- sub_data$data[[i]] if (!all_na(dat$migrant) && !all_na(dat[[response]]) && nlevels(droplevels(dat$migrant)) > 1) { m <- glm(form, family = binomial(), data = dat) pr <- ggemmeans(m, "migrant") pr$wave <- sub_data$wave[i] pr$country <- sub_data$country[i] pr } else { NA } })
Statistical Modeling
https://osf.io/7wd8e/
06-Trends.R
357
Binomial test for significant improvement in accuracy.
ifelse(rf_probs > .5, 0, 1) %>% sum %>% # count number of continuous responses binom.test(., nrow(pv), p = baseline)
Statistical Test
https://osf.io/x8vyw/
02_random_forest_analysis.R
358
Multiple item parameters and thresholds were in one cell. Split them into separate columns
item.dif <- cbind(item.dif, str_split_fixed(item.dif$orig.parameter, " ", 3)) item.dif <- cbind(item.dif, str_split_fixed(item.dif$orig.threshold, " ", 2)) item.dif2 <- cbind(item.dif2, str_split_fixed(item.dif2$orig.parameter, " ", 3)) item.dif2$Tau3 <- ifelse(item.dif2$item == "CR021Q08", 0.74200, NA) item.dif2 <- cbind(item.dif2, str_split_fixed(item.dif2$orig.threshold, " ", 3))
Data Variable
https://osf.io/8fzns/
1_Get_Item-Params_TR.R
359
interrater reliability create dataframe of reliability statistic for each of the 10 pairs of data
interraterReliability <- data.frame(row.names = row.names(interraterData)) for (i in c(1:10)) { interraterReliability[i,1] <- cohen.kappa(cbind(t(interraterData[i ,c(1:55)]), t(interraterData[i ,c(56:110)])))$weighted.kappa }
Data Variable
https://osf.io/2j47e/
Reliability.R
360
fill empty matrix with the counts significant (p < .05) per lag (rows) and per individual (column)
for (j in unique(dataset_imp$Participant)){ tempacf <- acf(dataset_imp[dataset_imp$Participant == j , -c(1,8:13)], lag.max = 60) acfmatrix[,paste0("participant", j)] <- 0 for(i in as.numeric(rownames(acfmatrix))){ tempmatrix <- tempacf$acf[i,,] tempvector <- tempmatrix[upper.tri(tempmatrix, diag = T)] acfmatrix[i,paste0("participant", j)] <- length(which(tempvector > 0.25 | tempvector < -0.25)) } }
Statistical Test
https://osf.io/tfbps/
R_Script_Idiographic_network_analyses.R
361
Create list with partial correlation matrices per window
pcorlist <- list() for (d in seq_len(30)) { pcorlist[[d]] <- pcor(temp_data[temp_data$Time %in% d:(d+Window),c("Happy", "Worrying","Nervous","Act_later_regret","Act_without_thinking","Restless")])$estimate } pcorlist <- rapply(pcorlist,function(x) ifelse(x==0.00000000,0.00000001,x), how = "replace") # will be pruned
Data Variable
https://osf.io/tfbps/
R_Script_Idiographic_network_analyses.R
362
Smooth with (bayesian) logistic regression
(mym<-mean(nus)) d<-list(nus=nus-mym,fur=prob.fur*10000) m<-quap(alist( fur~dbinom(10000,p), logit(p)<-a+b*nus, a~dnorm(0,1), b~dnorm(0,1) ),data=d)
Statistical Modeling
https://osf.io/pvyhe/
Prob_between.R
363
For home (emotional) climate items, recode values of "99" to missing.
mutate_at(vars(contains("EDNh_Emotion")), ~ifelse(. == 99, NA, .)) %>% # (Not sure exactly how this code works, but it does)
Data Variable
https://osf.io/xhrw6/
1_create_composite_measures.R
364
figure < annotate_figure(figure, top text_grob(str_c(Q_text[1]), size 8, color"black")) figure
ggsave(str_c(ordinal_y[QN],"_random.pdf"),plot=figure,width=12,height=14) return(figure) }
Visualization
https://osf.io/nd9yr/
ordinal_plot_functions.R
365
ordered_logistic stacked plot
N_cat <- max(d[,var_y],na.rm=T) grn <- 250 zLogM <- seq(-5,4,length.out=grn) P <- matrix(NA, nrow=grn, ncol=N_cat) for(i in 1:grn){ P[i,] <- ordered_logistic(fixef['b_X']*zLogM[i],cutpoints) } if(QN==1 | QN==4 | QN==5 | QN==8){ axis_labels <- c('fraction of responses','M-ratio') }else{ axis_labels <- c(' ',' ') } data.frame(P) -> P colnames(P) <- x_lab #str_c("p_",1:N_cat) P$zlogM <- zLogM P$logM <- P$zlogM * 2*sd(log(d$M_cov)) + mean(log(d$M_cov)) P$M <- exp(P$logM) P %>% pivot_longer(cols=x_lab, #starts_with("p_"), values_to="p", names_to="k") %>% mutate(k = as_factor(k), k = fct_rev(k)) %>% ggplot(aes(x=M, y=p, fill=k)) + scale_fill_viridis_d(name="",labels = wrap_format(20), option="rocket") + geom_area(alpha=0.8 , size=0.4, colour="black")+ themeXstack + labs(y=axis_labels[1],x=axis_labels[2])+ scale_y_continuous(breaks=seq(0,1,0.1))+ coord_cartesian(xlim=c(0.5,1.3),ylim=c(0+0.045,1-0.045))+ ggtitle(label=str_wrap(sel_Q_text,22)) -> pl_stack
Visualization
https://osf.io/nd9yr/
ordinal_plot_functions.R
366
calculate threshold for lowest quartile
quantile(tau_squared_self)[2]] tau2_thres_publ <- dat[as_factor(primary_data) == "yes" & k_publ > k_thres, quantile(tau_squared_self_publ)[2]]
Data Variable
https://osf.io/dqc3y/
analysis_MASTER.R
367
now we add up all corresponding answers to create one row per participant, as in a typical ANOVA design analysis
sdt.agg <- sdt.agg %>% group_by(Participant, type) %>% summarise(count = n()) %>% spread(type, count) sdt.agg #this is a new dataframe for the aggregate data, you can delete after.
Statistical Modeling
https://osf.io/abts4/
DECEPTION-ProbitSDT-v1.R
368
function pcor2beta gives you data from a partial correlation/network input pcor a partial correlation matrix / network output a matrix of betas, each column corresponds to a dependent variable so that you can get predicted values by a matrix multiplication in the form betas %*% data
pcor2beta <- function(pcor) { require(psych) require(corpcor) diag(pcor) <- 1 p <- ncol(pcor) betas <- matrix(0, ncol = p, nrow = p) for(i in 1:p) betas[-i,i] <- matReg(y = i, x = seq(p)[-i], C = pcor2cor(pcor))$beta betas[abs(betas) < 1e-13] <- 0 betas }
Statistical Modeling
https://osf.io/ywm3r/
predictability.R
369
extract posterior samples of omega
posterior.omega <- c(posterior.omega.check[, 1], posterior.omega.check[, 2]) posterior.omega.description.only <- c(posterior.omega.check[, 1], posterior.omega.check[, 2]) posterior.omega.description.plus.stats <- c(posterior.omega.check[, 1], posterior.omega.check[, 2])
Statistical Modeling
https://osf.io/x72cy/
AnalyzeDummyData.R
370
MEAN & SD FOR AMOUNT OF REM SLEEP (min) BETWEEN BASELINE AND ISOLATION
aggregate(REM~Place, data=REM_GNS, FUN=mean) aggregate(REM~Place, data=REM_GNS, FUN=sd)
Data Variable
https://osf.io/sx6yf/
2021-6-7_No_Man_Is_An_Island_analyses.R
371
calculating sociality bias ratio variable Converting the amount of dream interactions to same scale as the amount of wake time interactions was reported
IN$int_cat<-ifelse(IN$Interactions == 0, 1, ifelse(IN$Interactions >= 1 & IN$Interactions <= 5, 2, ifelse(IN$Interactions >= 6 & IN$Interactions <= 15, 3, ifelse(IN$Interactions >= 16 & IN$Interactions <= 25, 4,5)))) IN$dream_per_sos<-IN$int_cat/IN$sos_int_prev_day hist(IN$dream_per_sos) qqnorm(IN$dream_per_sos) #skewed hist(log(IN$dream_per_sos)) qqnorm(log(IN$dream_per_sos)) #looks more normally distributed after log-transformed
Data Variable
https://osf.io/sx6yf/
2021-6-7_No_Man_Is_An_Island_analyses.R
372
join predicted to test data
test_pred <- left_join(test,prediction,by="id") test_pred <- as.data.frame(test_pred) test_pred$class_var <- as.factor(test_pred$class_var) test_pred$class_var_pred <- as.factor(test_pred$class_var_pred)
Data Variable
https://osf.io/cqsr8/
boosting_xgbDART.R
373
Computing SMD (g) and its variance
smd <- escalc(measure = "SMD", m1i = wm, m2i = mm, sd1i = wsd, sd2i = msd, n1i = wn, n2i = mn, data = smd.means, append = TRUE) smd #Two columns (yi and vi) have been added#
Statistical Modeling
https://osf.io/rbxzs/
Script_R.R
374
Computing Fisher's z and its variance
zcor <- escalc(measure = "ZCOR", ri = cor, ni = sample, data = zcor.correlations, append = TRUE) zcor
Statistical Modeling
https://osf.io/rbxzs/
Script_R.R
375
Obtaining the forest plot
forest(res)
Visualization
https://osf.io/rbxzs/
Script_R.R
376
Analog to ANOVA: If we want to know the mean ES for each 'random' level: yes/no
res_r <- rma(yi = g, vi = var, mods = ~ factor(random)-1, data = dat) res_r
Statistical Test
https://osf.io/rbxzs/
Script_R.R
377
Obtaining a funnel plot (model without moderators)
funnel(res, main = "Random-Effects Model")
Visualization
https://osf.io/rbxzs/
Script_R.R
378
inspect potential multicollinearity using the variance inflation factor (VIF), for the example of the criterion variable outcome_sqd (see Fox, 2016 for a discussion of VIFs and their cutoffs)
lm_sqd <- lm(outcome_sqd ~ X + Y + X2 + XY + Y2, data=df) vif(lm_sqd)
Statistical Modeling
https://osf.io/yvw93/
R_code_test_congruence_effects.R
379
Mixed effects logistic regression
logistic_model_1 <- glmer(instructions ~ distance + (1|ID) + (1|target) + (1|task), data = choices, family = binomial(link = "logit")) logistic_model_2 <- glmer(instructions ~ distance + (1 + distance|ID) + (1|target) + (1|task), data = choices, family = binomial(link = "logit")) logistic_model_3 <- glmer(instructions ~ distance + (1 + distance|ID) + (1|target) + (1 + distance|task), data = choices, family = binomial(link = "logit"), control = glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000))) model_comparison <- anova(logistic_model_1, logistic_model_2, logistic_model_3)
Statistical Modeling
https://osf.io/hbju7/
study-2_main-analysis-code.R
380
GLMMs results of models with year as continuous are in table 3 Model with year as continuous variable
mod1ADI<-lmer(ADImS~LAT+LONG+YEAR1+(1|STATE)+(1|ROUTE2)+(1|SITE)+(1|YEAR1),dat=datUS,REML=F) mod1ADI<-lmer(ADImS~LAT+LONG+YEAR1+(1|SITE)+(1|COUNTRY)+(1|YEAR1),dat=datEU,REML=F)
Statistical Modeling
https://osf.io/jyuxk/
Analysis_and_source_code_table_3_and_figure_3.R
381
familiarity x female x BL check effect of Baseline on familiarity x gender (self) interaction
HR_test_BL = lmer(HR_EMA ~ SI_familiarity_cw * female * HR_BL_cb + female * SI_gender_partner + SIAS_cb + State_SI_Anxiety_cw + SI_count + SI_type_simple + SI_duration_cw + SI_caffeine + SI_nicotin + SI_alcohol + accel_EMA_cw + (1|Participant), data=df_interact) summary(HR_test_BL) Anova(HR_test_BL, type=3) anova(HR_A, HR_test_BL) # no difference RMSSD_test_BL = lmer(ln_RMSSD_EMA ~ SI_familiarity_cw * female * ln_RMSSD_BL_cb + female * SI_gender_partner + SIAS_cb + State_SI_Anxiety_cw + SI_count + SI_type_simple + SI_duration_cw + SI_caffeine + SI_nicotin + SI_alcohol + accel_EMA_cw + (1|Participant), data=df_interact) summary(RMSSD_test_BL) Anova(RMSSD_test_BL, type=3) anova(RMSSD_A, RMSSD_test_BL) # better
Statistical Modeling
https://osf.io/d3tg5/
Manuscript_main analyses.R
382
let's try to create a boxplot for mpg on the of cylinders (cyl)
ggplot(mtcars, aes(factor(cyl), mpg)) + geom_boxplot()
Visualization
https://osf.io/6g4js/
Graphics_Section_5.R
383
Likelihood Ratio Tests for nested model comparison
pair1_rand_anova <- anova(pair1_rand_full, pair1_rand2, pair1_rand3) # empty model = pair1_rand2 pair1_rand_anova_tidy <- tidy(pair1_rand_anova) pair2_rand_anova <- anova(pair2_rand_full, pair2_rand2, pair2_rand3) pair2_rand_anova_tidy <- tidy(pair2_rand_anova) # create tidy table of model parameters pair2_fixed_anova <- anova(pair2_full, pair2_fixed, pair2_fixed2, pair2_fixed3, pair2_rand2) pair2_fixed_anova_tidy <- tidy(pair2_fixed_anova) # create tidy table of model parameters pair2_fixed_lmer <- tidy(pair2_fixed2) # create tidy table of model parameters group1_rand_anova <- anova(group1_rand_full, group1_rand2, group1_rand3, group1_rand4) # a random intercept for each pair is significantly better than empty, otherwise all other random effects tested were not warranted group1_rand_anova_tidy <- tidy(group1_rand_anova) # create tidy table of model parameters group1_rand_lmer <- tidy(group1_rand3) # create tidy table of model parameters group2_rand_anova <- anova(group2_rand_full, group2_rand2, group2_rand3, group2_rand4, group2_rand5) group2_rand_anova_tidy <- tidy(group2_rand_anova) # create tidy table of model parameters group2_rand_lmer <- tidy(group2_rand2) # create tidy table of model parameters
Statistical Test
https://osf.io/67ncp/
duque_etal_2019_rcode.R
384
Fills in missing values of x with the mean of x Args: x: a numeric vector Returns: x with missing values filled in
return(replace(x, is.na(x), mean(x, na.rm = T))) } mseOfMatchingColumns <- function(nm, mat, dt) {
Data Variable
https://osf.io/2phst/
mse_values_latent_space_diffusion_slurm.R
385
**** 0.3.4) logisticPseudoR2s calculates Logistic pseudo Rs (from Field et al., 2013) input: glm object
logisticPseudoR2s <- function(LogModel) { dev <- LogModel$deviance nullDev <- LogModel$null.deviance modelN <- length(LogModel$fitted.values) R.l <- 1 - dev / nullDev R.cs <- 1- exp ( -(nullDev - dev) / modelN) R.n <- R.cs / ( 1 - ( exp (-(nullDev / modelN)))) outdat <- data.table(R.l = R.l, R.cs = R.cs, R.n = R.n) outdat[, r.sq.print := paste(round(R.l, 3), "/", round(R.cs, 3), "/", round(R.n, 3), sep = "")] return(outdat) }
Statistical Modeling
https://osf.io/dqc3y/
prep_functions.R
386
for loop goes through all the files in myLists and applies addTagsTextEntry to each row in the file
for (k in 1:length(myLists)) { i <- paste("list", k, ".txt", sep = "") o <- paste("upload", k, ".txt", sep = "") f <- read.delim(i, header = FALSE, stringsAsFactors = FALSE) write("[[AdvancedFormat]]", o) apply(f, 1, addMultiChoiceSame, o) } rm(list=ls())
Data Variable
https://osf.io/t2jka/
multipleChoiceSame.R
387
compute mean LT per condition, group and accuracy
LT <- ddply(data, .(rctype,group,acc_lab), summarize, mean.rt = mean(rt), se.lower = mean.rt - se(rt), se.upper = mean.rt + se(rt))
Data Variable
https://osf.io/kdjqz/
Lissonetal2021-script.R
388
Fit uni.cfa and save output in uni.cfi.fit. Use fixed factor (std.lv TRUE), and FIML for missing data)
uni.cfa.fit = cfa(uni.cfa, data = SSSSdat, std.lv = TRUE, missing = "ML")
Statistical Modeling
https://osf.io/mbf32/
SSSS-Latent Variable Analysis Made Easy (1.0) .R
389
Request summary output from model, including fit indexes, standardized estimates, and R^2/communalities
summary(uni.cfa.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE) summary(five.cfa.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE) summary(five.sem.parcel.corr.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE) summary(five.sem.parcel.reg.fit, fit.measures = TRUE, standardized = TRUE, rsquare=TRUE)
Statistical Modeling
https://osf.io/mbf32/
SSSS-Latent Variable Analysis Made Easy (1.0) .R
390
We need to get an idea of how many factors are likely needed. Parallel analysis can help. Save output in object "parallel", using psych() fa.parallel function. Variables 125 (the BFI items) are to be analzyed, using maximum likelihood (ml) common factors (fa). Simulate 50 other samples of "garbage factors", using R^2s (SMC) as initial communality estimates and compare observed eigenvalues to 95th quantile of simulated "garbage factor" eigenvalues
parallel = fa.parallel(SSSSdat[1:25], fm = 'ml', fa = 'fa', n.iter = 50, SMC = TRUE, quant = .95)
Statistical Modeling
https://osf.io/mbf32/
SSSS-Latent Variable Analysis Made Easy (1.0) .R
391
Fit measurement invariance models based on five.cfa model, distinguishing by group levels of "gender". Save output in invar.output object
invar.output = measurementInvariance(five.cfa, data = SSSSdat, group = "gender")
Statistical Modeling
https://osf.io/mbf32/
SSSS-Latent Variable Analysis Made Easy (1.0) .R
392
Create function to calculate ICC from fitted model: ICC between family variance / total family variance
calc.icc <- function(y) { sumy <- summary(y) (sumy$varcor$famnumber[1]) / (sumy$varcor$famnumber[1] + sumy$sigma^2) } calc.icc <- function(y) { sumy <- summary(y) (sumy$varcor$famnumber[1]) / (sumy$varcor$famnumber[1] + sumy$sigma^2) }
Statistical Modeling
https://osf.io/9vn68/
Syntax_Sibpaper1_final_2021-02-12.R
393
** 5.2) Poisson regression **** 5.2.1) Full set model 1: summary effect (absolute value)
pred.nbias.1 <- glm(nmeth_bias ~ scale(abs(MA_ES_self)), data = dat[!is.na(nmeth_bias) & k_self > k_thres, ], family = poisson) summary(pred.nbias.1) rsq.pred.nbias.1 <- logisticPseudoR2s(pred.nbias.1)
Statistical Modeling
https://osf.io/dqc3y/
analysis_mmreg.R
394
ANOVA dependent variable: drift rate independent variable:masking time condition letters task, pretest
cv_pre_drift = cogito %>% gather(key = "maskingtime", value = "drift", cvt1_v1,cvt1_v2,cvt1_v3,cvt1_v4) %>% convert_as_factor(obs, maskingtime) aov_cv_pre <- anova_test(data = cv_pre_drift , dv = drift, wid = obs, within = maskingtime, type=3, detailed=T) get_anova_table(aov_cv_pre, correction="none")
Data Variable
https://osf.io/5qx7e/
Descriptives_Tables_1_2_S1_S2.R
395
subsample of the PG without patients with schizophrenia or other psychotic disorders (F20. or F23.)
subsample_PGwithoutF20_23 <- subsample_PG[subsample_PG$F20_23==0, ]
Data Variable
https://osf.io/73y8p/
RAQ-R_reliability after exclusion.R
396
function returns raster of posterior probabilities for bivariate normal data x is the unknown tissue of interest, will have two values, one for each isotope m is a 2D vector, all the values in the raster for each isotope v is the same as m, but for variances r is a single number the covariance. Can be vector if estimated as nonstationary ras is a raster that will serve as a template for the final product
calcCellProb2D <- function(x,m,v,r,ras) { pd <- 1/(2*pi*sqrt(v[,1])*sqrt(v[,2])*sqrt(1-r^2))*exp(-(1/(2*(1-r^2)))* ((x[1]-m[,1])^2/v[,1]+(x[2]-m[,2])^2/v[,2]-(2*r*(x[1]-m[,1])* (x[2]-m[,2]))/(sqrt(v[,1])*sqrt(v[,2])))) pdras <- setValues(ras,pd) return(pdras) }
Statistical Modeling
https://osf.io/ynx3m/
WoCP_publication_script.R
397
function returns raster of posterior probability distribution
calcPostProb <- function(x){ pp <- x/cellStats(x,sum) return(pp) }
Data Variable
https://osf.io/ynx3m/
WoCP_publication_script.R
398
reproject the site coordinates to get the site to plot (thanks to P. Schauer)
myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords myproj <- proj4string(origins)#get the CRS string iso.data.sp <- iso.data #make a copy coordinates(iso.data.sp) <- ~ Long + Lat #specify the coordinates proj4string(iso.data.sp) <- CRS("+proj=longlat +datum=WGS84") #project first iso.data.sp <- spTransform(iso.data.sp, myproj) #then re-project using the projection from the base map wrld_simpl2 <- wrld_simpl #make a copy wrld_simpl2 <- spTransform(wrld_simpl2, myproj) #this is to add in country borders etc. if you want to as they have the same projection issues as the site coords
Visualization
https://osf.io/ynx3m/
WoCP_publication_script.R
399
cutting dendrogram in 3 clusters
clus3 = cutree(hcOSrenamel, 3) Women_WOCP_cluster<-plot(as.phylo(hcOSrenamel), type = "fan", tip.color = cbbPalette[clus3]) Women_WOCP_cluster
Data Variable
https://osf.io/ynx3m/
WoCP_publication_script.R
400
calculate intercorrelations between diamonds on the betweenperson level with 95% Bootstrapped CI
between.person.diamonds = diamonds %>% group_by(user_id) %>% summarise(across(where(is.numeric), ~ mean(.x))) between.person.diamonds = between.person.diamonds %>% dplyr::select(diamonds_duty, diamonds_intellect, diamonds_adversity, diamonds_mating, diamonds_positivity, diamonds_negativity, diamonds_deception, diamonds_sociality) cors.diamonds = round(cor(between.person.diamonds, method = "pearson"),2) cors.diamonds = as.data.frame(cors.diamonds)
Statistical Test
https://osf.io/b7krz/
Descriptives_Selfreports.R