ID
int64
1
1.07k
Comment
stringlengths
8
1.13k
Code
stringlengths
10
4.28k
Label
stringclasses
4 values
Source
stringlengths
21
21
File
stringlengths
4
82
401
Analysis Linear Probability Model on Impact Factor
fit_ev_if <- lm(X1.year.Impact.Factor ~ diff_ev + factor(year), data=FullDataset) summary(fit_ev_if) # show results fit_ex_if <- lm(X1.year.Impact.Factor ~ diff_ex + factor(year), data=FullDataset) summary(fit_ex_if) # show results
Statistical Modeling
https://osf.io/jh47m/
Gatekeeper analysis.r
402
Tobit Model as robusntess check assign value 0 to Impact factors for unpublished studies
FullDataset$X1.year.Impact.Factor[FullDataset$published == "No"] = 0 tobit_ev_if <- censReg( X1.year.Impact.Factor ~ diff_ev + factor(year), data = FullDataset, left = 0) summary(tobit_ev_if) tobit_ex_if <- censReg( X1.year.Impact.Factor ~ diff_ex + factor(year), data = FullDataset, left = 0) summary(tobit_ex_if)
Statistical Modeling
https://osf.io/jh47m/
Gatekeeper analysis.r
403
initialize standard error function
stderr <- function(x, na.rm=FALSE) { if (na.rm) x <- na.omit(x) sqrt(var(x)/length(x)) }
Data Variable
https://osf.io/wyrav/
AnalysisScript-ResearchQuestion1.R
404
2019 data zstandardize mood scores for each person
dat2019_scaled <- dat2019_complete %>% dplyr::select(identity_id, country, doy, mood, weekday) %>% group_by(identity_id) %>% mutate_at(vars(-identity_id,-doy,-country, -weekday), scale)
Data Variable
https://osf.io/wyrav/
AnalysisScript-ResearchQuestion1.R
405
Run regression anlayses and save as table (make one table with mood and depression see depression section)
m1_het <- lm(mood_change ~ gender + age + education_level + race, data = dat_het_prepost) m2_het <- lm(depression_change ~ gender + age + education_level + race, data = dat_het_prepost) tab_model(m1_het, m2_het, show.est = F, show.std = T, digits = 3, file="Mood_Depression_PrePost_Demog_std.doc")
Statistical Modeling
https://osf.io/wyrav/
AnalysisScript-ResearchQuestion1.R
406
Run multilevel model analyses predicting mood scores from the the dummycoded month variable
dat2020_complete$month <- as.factor(dat2020_complete$month) summary(m1 <- lmer(depression ~ month + (1|identity_id), data = subset(dat2020_complete, country == "United States"))) summary(m2 <- lmer(depression ~ month + (1|identity_id), data = subset(dat2020_complete, country == "Germany"))) summary(m3 <- lmer(depression ~ month + (1|identity_id), data = subset(dat2020_complete, country == "United Kingdom"))) tab_model(m1,m2,m3, show.est = F, show.std = T, digits = 3, file="Depression_2020_Months.doc")
Statistical Modeling
https://osf.io/wyrav/
AnalysisScript-ResearchQuestion1.R
407
Step 2: Create onehot encodings (dummy variables)
testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2) testData3 <- predict(dummies_model, testData2)
Data Variable
https://osf.io/wyrav/
ThesisMLROCCode.R
408
function to generate output csv files for a list of response IDs inputs: dataset, list of response IDs outputs: csv files named with response IDs
multi_response <- function(dataset, IDs_list) { for (i in IDs_list) { participant_response <- single_response(dataset, i) csv_name <- str_c(i, ".csv", sep = "") write.csv(participant_response, csv_name) } }
Visualization
https://osf.io/3bn9u/
4_4_identification.R
409
Means and SDs of proportional looking time to novel object in the preferential looking phase condition 1
mean(dfX_base$PrefLook_LT_Object_Nov_PROP[which(dfX_base$Condition == "Con1")], na.rm = TRUE)
Data Variable
https://osf.io/yfegm/
PREPROC_script_Experiment1.r
410
calculate mode of a vector
mode.knn = function(x){ uniq.x = unique(x) uniq.x = uniq.x[which(!is.na(uniq.x))] knn = uniq.x[which.max(tabulate(match(x, uniq.x)))] return(knn) }
Statistical Modeling
https://osf.io/b7krz/
helper_variables.R
411
find [k] neigherst neigbors and impute by their mode
impute.knn = function(y, k){ t = which(is.na(y)) if(length(t) == 0){ return(y) }else{ is = 1:length(t) for(i in is){ if(i > k){ look.at = y[(t[i]-k):(t[i]+k)] y[t[i]] = mode.knn(look.at) } if(i <= k){ look.at = y[1:(t[i]+k)] y[t[i]] = mode.knn(look.at) } } return(y) } }
Statistical Modeling
https://osf.io/b7krz/
helper_variables.R
412
get categorical data information then look up the variables in the process in categorical_data so we can get the number of categories for each variable in the process this would be achieved by categories[cat_indices[i]] for variable i in the process
categories <- as.integer(mplus.get.group.attribute(file,'categorical_data','categories')) catvars <- mplus.get.group.attribute(file,'categorical_data','var_names') vartypes <- as.integer(mplus.get.group.attribute(file,'categorical_data','vtype')) if (series) { categories <- as.integer(mplus.get.group.attribute(file,'categorical_data','categories')) catvars <- mplus.get.group.attribute(file,'categorical_data','var_names') vartypes <- as.integer(mplus.get.group.attribute(file,'categorical_data','vtype')) if (series) {
Data Variable
https://osf.io/nxyh3/
mplus.R
413
get indices and names of the variables in the series
var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices') var_names <- mplus.get.group.attribute(file,cstr2,'var_names') cat_indices <- pmatch(var_names, catvars, nomatch=0) cat_indices <- as.integer(cat_indices) var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices') var_names <- mplus.get.group.attribute(file,cstr2,'var_names') cat_indices <- as.integer(pmatch(var_names, catvars, nomatch=0))
Data Variable
https://osf.io/nxyh3/
mplus.R
414
extract correlations between selfreported scores (observational level!)
self.reported.scores = c("duty", "intellect", "mating", "positivity", "sociality") l = "mating" for(i in self.reported.scores[1:length(self.reported.scores)-1]){ data_phi = ftable(data[,c(paste0("diamonds_", l), paste0("diamonds_", self.reported.scores[which(self.reported.scores == i)+1]))]) print(paste(l, "and", self.reported.scores[which(self.reported.scores == i)+1])) print(phi(data_phi)) }
Data Variable
https://osf.io/b7krz/
02_IML_LASSO_FeatImp.R
415
generate unique study identifier, format and annotate data
d$id <- paste0(d$First_Author,", ", d$Year) d$Year <- as.numeric(substr(d$Year,1,4)) d$ids <- NA for(i in d$id) d[d$id == i,"ids"] <- 1:dim(d[d$id == i,"ids"])[1] d$ids <- as.character(d$ids) d$Age <- (as.numeric(d$Age_M) - 30) / 20 d$energy_renew <- d$energy_renew1 + d$energy_renew2 find_cCode <- Vectorize(function(i) which(unlist(Map(function(x) sum(d[i,grepl("SVS",names(d))] - f[x,grepl("SVS",names(f))]), 1:dim(f)[1])) == 0)) d$Ccode <- f$Ccode[find_cCode(1:dim(d)[1])]
Data Variable
https://osf.io/qxf5t/
TSST_Meta.R
416
to make sure that effects are tested from complex to simple, we reverse the order of the vector containing the to be tested effects:
effects_to_test = rev(effects_to_test)
Data Variable
https://osf.io/dpkyb/
create_model_formulas.R
417
combine model matrix and data to include participant id and DV:
new_data = data.frame(dplyr::select(data, contains(c(group, DV_variables, by))), model_matrix)
Data Variable
https://osf.io/dpkyb/
create_model_formulas.R
418
check if retention is a number from 0 to 1
if (retention < 0 || retention > 1) { stop('Retention value is not a number from 0 to 1.') }
Data Variable
https://osf.io/a9bv6/
sanet_2.R
419
construct a maximal glmer() model This model contains code for Ambiguity effect, plus random effects by participants and items.
Acc.Modality.max <- glmer(Correct ~ 1 + Ambiguity.code + (1 + Ambiguity.code | ï..ID) + (1 | Item), data = Data.DisambTask.AmbvsUnamb, family = "binomial", control = glmerControl(optimizer ="bobyqa"))
Statistical Modeling
https://osf.io/m87vg/
Exp2_BehaviouralAnalyses_Code.R
420
create a histogram of the residuals
hist(rawResiduals) hist(invResiduals) hist(logResiduals)
Visualization
https://osf.io/m87vg/
Exp2_BehaviouralAnalyses_Code.R
421
construct a maximal lmer() model This model contains a fixed withinsubjects effect of Ambiguity (effectcoded with 0.5 amb) plus random effects by participants and items.
RT.max <- lmer(logRT ~ 1 + Ambiguity.code + (1 + Ambiguity.code | ï..ID) + (1 | Item), data = Data.DisambTask.AmbvsUnamb, REML=FALSE)
Statistical Modeling
https://osf.io/m87vg/
Exp2_BehaviouralAnalyses_Code.R
422
construct a maximal glmer() model This model contains codes for Modality effects, plus random effects by participants and items.
Acc.Modality.max <- glmer(Correct ~ 1 + Modality.code1 + Modality.code2 + (1 + Modality.code1 + Modality.code2 | ï..ID) + (1 | Item), data = Data.DisambTask.Amb, family = "binomial", control = glmerControl(optimizer ="bobyqa")) Acc.Modality.max <- glmer(True.Positive ~ 1 + Modality.code2 + (1 + Modality.code2 | ID) + (1 | Item), data = Data.ListAndRead, family = "binomial", control = glmerControl(optimizer ="bobyqa")) Acc.Modality.max <- glmer(True.Positive ~ 1 + Modality.code3 + (1 + Modality.code3 | ID) + (1 | Item), data = Data.ListAndRSVP, family = "binomial", control = glmerControl(optimizer ="bobyqa")) Acc.Modality.max <- glmer(True.Positive ~ 1 + Modality.code3 + (1 + Modality.code3 | ID) + (1 | Item), data = Data.ReadAndRSVP, family = "binomial", control = glmerControl(optimizer ="bobyqa"))
Statistical Modeling
https://osf.io/m87vg/
Exp2_BehaviouralAnalyses_Code.R
423
construct a maximal glmer() model This model contains codes for run effects, plus random effects by participants and items.
Acc.Run.max <- glmer(Correct ~ 1 + Run.code1 + Run.code2 + (1 + Run.code1 + Run.code2 | ï..ID) + (1 | Item), data = Data.DisambTask.Amb, family = "binomial", control = glmerControl(optimizer ="bobyqa")) Acc.Run.max <- glmer(True.Positive ~ 1 + Run.code1 + Run.code2 + (1 + Run.code1 + Run.code2 | ID) + (1 | Item), data = Data.RecMem, family = "binomial", control = glmerControl(optimizer ="bobyqa"))
Statistical Modeling
https://osf.io/m87vg/
Exp2_BehaviouralAnalyses_Code.R
424
run a onesample ttest comparing accuracy to chance level (0.5)
t.test(Data.List$True.Positive, mu=0.5) t.test(Data.Read$True.Positive, mu=0.5) t.test(Data.RSVP$True.Positive, mu=0.5)
Statistical Test
https://osf.io/m87vg/
Exp2_BehaviouralAnalyses_Code.R
425
abbreviate first names to match with abbreviations in publication list
stats_persons <- lapply(stats_persons, function(x){ x[2]<-str_sub(x[2], 1, 1) x }) stats_persons <- lapply(stats_persons, function(x)paste(x[1], x[2], sep = ","))
Data Variable
https://osf.io/rf6zu/
scrape_web_pages.R
426
Load the data
load('../Relative Effectiveness Data - final.Rdata')
Data Variable
https://osf.io/3aryn/
6speciesismgraphs.R
427
Print a txt and csv file with the results ' ' @param stats_to_print the tibble/table to print ' @param name_file name of the file to write ' ' @return 2 files with the results (txt and csv)
print_result <- function(stats_to_print, name_file){ knitr::kable( stats_to_print, format = "rst") %>% cat( file = here('results', str_c(name_file, '.txt', sep = "")), sep = "\n") stats_to_print %>% write_csv(file = here('results', str_c(name_file, '.csv', sep = ""))) }
Visualization
https://osf.io/4fvwe/
print_result.R
428
convert degrees of freedom into numeric variables and store in new variable
ScienceStatus$df.numerator.value = as.numeric(as.character(ScienceStatus$df.numerator)) ScienceStatus$df.denominator.value = as.numeric(as.character(ScienceStatus$df.denominator))
Data Variable
https://osf.io/he8mu/
Study2_Load_Analysis_Post_Review_11-14-16_Final.R
429
Assigning numeric values to variable labels which were stored in CSV
ScienceStatus$coding.difficulty_r = revalue(ScienceStatus$coding.difficulty, c("Very Easy"="1", "Moderately Easy"="2", "Slightly Easy"="3", "Neither Difficult nor Easy"="4", "Slightly Difficult"="5", "Moderately Difficult"="6", "Very Difficult"="7")) ScienceStatus$coding.difficulty_r = as.numeric(as.character(ScienceStatus$coding.difficulty_r))
Data Variable
https://osf.io/he8mu/
Study2_Load_Analysis_Post_Review_11-14-16_Final.R
430
Calcuate EXACT pvalue from the stats reported in the paper (Posthoc)
ScienceStatus_J$Calc.Pvalue<-(rowSums(cbind(ScienceStatus_J$T.pvalue_calc, ScienceStatus_J$F.pvalue_calc, ScienceStatus_J$rg.pvalue_calc, ScienceStatus_J$r.pvalue_calc,ScienceStatus_J$chi.pvalue_calc), na.rm = TRUE) + ifelse(is.na(ScienceStatus_J$T.pvalue_calc) & is.na(ScienceStatus_J$F.pvalue_calc) & is.na(ScienceStatus_J$rg.pvalue_calc) & is.na(ScienceStatus_J$r.pvalue_calc) & is.na(ScienceStatus_J$chi.pvalue_calc), NA, 0))
Statistical Test
https://osf.io/he8mu/
Study2_Load_Analysis_Post_Review_11-14-16_Final.R
431
PCurve Graph Calculated from statistic and DF By Paper
P.By.Paper<-ddply(ScienceStatus_SP, .(article.id,yearcat), summarize, Pmean.calc = stouffer.P(Calc.Pvalue.Clean), PMedian.calc = median(Calc.Pvalue.Clean))
Visualization
https://osf.io/he8mu/
Study2_Load_Analysis_Post_Review_11-14-16_Final.R
432
RIndex Calcuate Mean, Median, Peak
Rindex.Reults<-BCa.Boot.CI(ScienceStatus_SP,Calc.Z,r.index.calc.boot,LogT=FALSE,splitter=yearcat, StatType="AR") Rindex.Reults
Statistical Modeling
https://osf.io/he8mu/
Study2_Load_Analysis_Post_Review_11-14-16_Final.R
433
FIGURE 3 Length in phonemes RD fit a model with fixed effects and with the random effect of subjects on intercepts and the slopes for length
Cue.Subj.lmer = glmer(ACC ~ CueCondition + zLengthPh + zFreq + (1+zLengthPh|Subject), data = NamingData, family = "binomial", control = glmerControl(optimizer="bobyqa")) summary(Cue.Subj.lmer)
Statistical Modeling
https://osf.io/bfq39/
Code_LMMs_Code_BestPractice_Example.R
434
extract fitted values and align with original data for plotting
a<-fitted.values(Cue.Subj.lmer) length(a) Dat<-na.omit(NamingData) #remove NAs from original data so aligns with model values Dat$fitted<-a names(Dat) a<-fitted.values(Cue.Subj.lmer) length(a) Dat<-na.omit(NamingData) #remove NAs from original data so aligns with model values Dat$fitted<-a names(Dat)
Visualization
https://osf.io/bfq39/
Code_LMMs_Code_BestPractice_Example.R
435
FIGURE 4a Plot average slope for effect of Frequency
p <- ggplot(data=Dat,aes(x=zFreq, y=fitted)) p <- p + geom_smooth(method = "glm") p <- p + xlab("Frequency (z score)") + ylab("Accuracy (model fit)") p <- p + ggtitle("4a) Average (group) effect of Frequency") p <- p + ylim(0,1) p <- p + theme_bw() + theme(text=element_text(size=12)) p
Visualization
https://osf.io/bfq39/
Code_LMMs_Code_BestPractice_Example.R
436
Main effects model for predicting inversion questions during alignment sessions.
inversion_main_effects_model<-glmer(inversion_dv ~ prime_type + WMC + modality + inversion_production_pre + (1 + prime_type + trial_order | subject) + (1|verb), priming, family="binomial"(link="logit"),glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000))) summary(inversion_main_effects_model)
Statistical Modeling
https://osf.io/f3qrh/
Kim Skalicky and Jung - R syntax.R
437
Odds ratios for main effects model
inversion.me.CI<-confint(inversion_main_effects_model, parm="beta_", level=0.90, method="Wald") inversion.me.tab <- cbind(est = fixef(inversion_main_effects_model),inversion.me.CI) inversion.me.tab <- exp(inversion.me.tab) inversion.me.tab <- as.data.frame(inversion.me.tab) inversion.me.tab <- rownames_to_column(inversion.me.tab) inversion.me.tab
Statistical Modeling
https://osf.io/f3qrh/
Kim Skalicky and Jung - R syntax.R
438
Visualize the interaction between prime type and modality. save the specific effect to a variable
effect2 <-effect("prime_type*modality",inversion_full_model_sig_int_only) summary(effect2) plot(effect2)
Visualization
https://osf.io/f3qrh/
Kim Skalicky and Jung - R syntax.R
439
post hoc analyses to compare all levels of test order between groups indirect production main effect only
ind_prod_me_between_groups <- emmeans(indirect_production_me,c("test_order"), type = "response") pairs(ind_prod_me_between_groups, reverse = F, type = 'response', adjust = 'none') plot(ind_prod_me_between_groups)
Statistical Test
https://osf.io/f3qrh/
Kim Skalicky and Jung - R syntax.R
440
Test assumptions MANOVA Test whether residuals are normally distributed
df$pc1.residuals = lm(pc1~condition.socaccount, data=df)$residuals df$pc2.residuals = lm(pc2~condition.socaccount, data=df)$residuals df$pc3.residuals = lm(pc3~condition.socaccount, data=df)$residuals df$pc4.residuals = lm(pc4~condition.socaccount, data=df)$residuals shapiro.test(df$pc1.residuals) shapiro.test(df$pc2.residuals) shapiro.test(df$pc3.residuals) shapiro.test(df$pc4.residuals)
Statistical Test
https://osf.io/qj86m/
9_manova_fda_socaccount.R
441
Only include tweets with at least 3 words
final_twitter_data_ex <- final_twitter_data %>% filter(wc>=3) final_twitter_data_ex <- final_twitter_data_ex %>% filter(bryscore>=0.0001)
Data Variable
https://osf.io/qxwsz/
anxiety_abstraction_s2.R
442
Means and SDs in feartweets and anxietytweets
final_twitter_data_ex %>% group_by(fearVSanx) %>% summarise_at(vars(bryscore,i,we,they,focusfuture), list(mean=mean, sd=sd))
Data Variable
https://osf.io/qxwsz/
anxiety_abstraction_s2.R
443
Independent samples ttest: Brysbaert concreteness score as DV
t.test_bci <- final_twitter_data_ex %>% rstatix::t_test(bryscore ~ fearVSanx, var.equal = TRUE, detailed=TRUE) %>% rstatix::add_significance() t.test_bci
Statistical Test
https://osf.io/qxwsz/
anxiety_abstraction_s2.R
444
convert age from months to years
demog.tab$interview_age <- demog.tab$interview_age / 12
Data Variable
https://osf.io/5y27d/
load_all_tables.R
445
multinomial CI (these are simulataneous CI although assume full pooling)
obs_p_CI <- DescTools::MultinomCI(pl_d$N, conf.level=1-0.05/2, method="goodman") *4 pl_d$obs_p_lb <- obs_p_CI[,2] pl_d$obs_p_ub <- obs_p_CI[,3] pl_d %>% mutate(resp_true = ifelse(k>=4,1,0), fill_group = str_c(item, resp_true, type), line_group = str_c(item, type), item = factor(item, levels=c("true","false"))) %>% ggplot(aes(x=k,y=obs_p))+ facet_grid(item ~ type) + nice_theme + geom_col(aes(fill=fill_group))+ geom_errorbar(aes(y=obs_p,ymin=obs_p_lb, ymax=obs_p_ub),color="dark grey",width=0,lwd=0.8)+
Statistical Modeling
https://osf.io/nd9yr/
make_fig1_main_text.R
446
Mratio posterior density plot
log_mu_sci <- unlist(c(output[,grep('mu_logMratio_1',varnames(output))])) log_mu_cov <- unlist(c(output[,grep('mu_logMratio_2',varnames(output))])) d_groupM <- data.frame(logM =c(log_mu_sci,log_mu_cov), type=c(rep('science',length(log_mu_sci)),rep('Covid-19',length(log_mu_sci)))) d_groupM %>% mutate(M=exp(logM)) %>% ggplot(aes(x=M,y=type,fill=type))+ nice_theme + geom_vline(xintercept = 1, lty=2,size=0.4)+ stat_halfeye(.width = c(.95, .95),aes(fill=type),alpha=0.8)+
Visualization
https://osf.io/nd9yr/
make_fig1_main_text.R
447
Convert `campaign_week` from a character string to a factor with 11 levels to order weeks chronologically.
Facebook_Users$campaign_week <- factor(Facebook_Users$campaign_week, levels = c("Week 1", "Week 2", "Week 3", "Week 4", "Week 5", "Week 6", "Week 7", "Week 8", "Week 9", "Week 10", "Week 11", "Election Day"))
Data Variable
https://osf.io/3fnjq/
facebook_users.R
448
Convert `candidate_page` from a character string to a factor with 3 levels.(Facilitates data visualization and data exploration).
Facebook_Users$candidate_page <- factor(Facebook_Users$candidate_page, levels = c ("Harper", "Trudeau", "Mulcair"))
Visualization
https://osf.io/3fnjq/
facebook_users.R
449
Adds a new column to identify the Facebook page being `liked`.
Harper_User_Likes$partisanship <- "Conservative" Trudeau_User_Likes$partisanship <- "Liberal" Mulcair_User_Likes$partisanship <- "Social Democrat"
Data Variable
https://osf.io/3fnjq/
facebook_users.R
450
Match partisan assignment from Facebook_User_Likes to the Facebook Users Dataset using `user_id`.
Facebook_Users$partisanship <- NA Facebook_Users$partisanship <- Facebook_User_Likes$partisanship[match(Facebook_Users$user_id, Facebook_User_Likes$user_id)]
Data Variable
https://osf.io/3fnjq/
facebook_users.R
451
Convert `partisanship` from a character string to a factor with 3 levels.(Facilitates data visualization and data exploration).
Facebook_Users$partisanship <- factor(Facebook_Users$partisanship, levels = c ("Conservative", "Liberal", "Social Democrat"))
Data Variable
https://osf.io/3fnjq/
facebook_users.R
452
split the data and transform it into integers
ARTE <- as.numeric(data[which(data$TV.Station == 'ARTE'),]$Rating) Degeto <- as.numeric(data[which(data$TV.Station == 'Degeto Film'),]$Rating)
Data Variable
https://osf.io/8fsbd/
IMDB_analysis.r
453
run the onesided ttest with alpha 0.1
t.test(ARTE, Degeto, alternative = "greater", conf.level = 0.90)
Statistical Test
https://osf.io/8fsbd/
IMDB_analysis.r
454
Create list of dataframes per country
countries <- unique(data8$cntry_full) index = 0 listofdfs <- list()
Data Variable
https://osf.io/k853j/
ESS_openness_2016_perCountry.R
455
Draw correlation plots
char.col<-c("#228822","#663388","#006688") groupn<-c(4,11,6,3) left<--8 top<-25 corrplot.mixed(cortabNB, upper = "ellipse",tl.pos="lt",tl.col=rep(char.col,groupn[1:3]),number.cex = .7) text(left,top,"Non-biological\nfathers",cex=1.2,pos=4) nc<-0.5 w<-16 tiff("Figure_S3_corr1.tif",width=w,height=2*w,units="cm",res=600,compression="lzw") par(mfrow=c(2,1)) corrplot.mixed(cortabNB, upper = "ellipse",tl.pos="lt",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,"Non-biological\nfathers",cex=1.2,pos=4) corrplot.mixed(cortabPA, upper = "ellipse",tl.pos="lt",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,"Partners",cex=1.2,pos=4) dev.off() tiff("Figure_S4_corr2.tif",width=w,height=2*w,units="cm",res=600,compression="lzw") par(mfrow=c(2,1)) corrplot.mixed(cortabLO, upper = "ellipse",tl.pos="lt",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,"Sensitive\nperiod\nlog-odds",cex=1.2,pos=4) corrplot.mixed(cortabD, upper = "ellipse",tl.pos="lt",number.cex = nc,tl.col=rep(char.col,groupn[1:3])) text(left,top,"Relative\nsimilarity\nt value",cex=1.2,pos=4) dev.off()
Visualization
https://osf.io/greqt/
07_correlation_structure.R
456
function to calculate proporiton of studies within each time period for continuous variables that were transformed in a cutoff x character string of the variable name character string of the name that will appear in the table time character string of the time period variable method either "chisq" or "fisher" depending on the type of the test wanted
CAT<-function(x, name, time, method) { tabtot <- table(DATA[,x]) sumtot <- sum(tabtot) tabtime <- table(DATA[,x], DATA[,time]) sumtime <- apply(tabtime, 2, sum) proptot <- tabtot[2]/sum(tabtot) CItot <- exactci(tabtot[2], sum(tabtot), conf.level = 0.95) proptime <- tabtime[2,]/sumtime CItime <- map2(tabtime[2,], sumtime, function(x, y){exactci(x, y, conf.level = 0.95)}) propCItime <- map2(proptime, CItime, function(x,y){paste0(round(x*100,0)," (", round(y[["conf.int"]][1]*100,0),"-", round(y[["conf.int"]][2]*100,0),")")}) if(method == "fisher"){ p <- fisher.test(tabtime) } else if(method == "chisq"){ p<-chisq.test(tabtime) } N_propCItime <- NULL for(i in 1:length(propCItime)){ N_propCItime <- c(N_propCItime, c(sumtime[i], propCItime[[i]])) } res=c(name, sumtot, paste0(round(proptot*100,0)," (",round(CItot$conf.int[1]*100,0),"-", round(CItot$conf.int[2]*100,0),")"), N_propCItime, round(p$p.value[1],4)) return(res) }
Statistical Test
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
457
function to calculate overall effect size for death or readmission rates (output has results + model to do model checks) var character string of the variable to calculate the effect size for
rate_ES <- function(var){ dat <- na.omit(DATA[,c("number_follow_up", var)]) colnames(dat)<-c("Ni","Ei") dat_es <- escalc( xi= Ei, ni =Ni, data = dat, measure = "PLO", to="if0all")
Statistical Modeling
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
458
function to get weighted logistic regression with continuous X variable and proportion Y variable and ggplot showing the predicted relationship x X variable as character y Y variable as character Xpred vector of X values to get predictions for labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative position of text with OR and pvalue
wtlogis_plot <- function(NB, x, y, Xpred, labX, labY, limY, title, ypos, unit){ dat <- data.frame(count=as.integer(DATA[,y]), Ntot=DATA[,NB], t=DATA[,x]) %>% mutate(prop = count/Ntot, perc = prop*100, wt = log(Ntot), t_div = t/10) dat <- na.omit(dat)
Visualization
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
459
model with time divided by 10 to get OR for 10yr increment
timediv_mod <- glm(prop ~ t_div, family = "binomial", weights = wt, data = dat) ORdiv <- tidy(timediv_mod, conf.int = TRUE) %>% mutate(OR_CI = paste0(round(exp(estimate),2), " (", round(exp(conf.low),2),"-", round(exp(conf.high),2), ")"))
Statistical Modeling
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
460
function to get weighted linear regression with continuous X variable and continuous Y variable and ggplot showing the predicted relationship x X variable as character y Y variable as character Xpred vector of X values to get predictions for labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative position of text with OR and pvalue
wtlin_plot <- function(NB, x, y, Xpred, labX, labY, limY, title, ypos, unit){ dat <- data.frame(y=as.integer(DATA[,y]), Ntot=DATA[,NB], t=DATA[,x]) %>% mutate(wt = log(Ntot), t_div = t/10) dat <- na.omit(dat)
Statistical Modeling
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
461
function to get OR and CI from the model with 10 year increment obj model with x variable divided by 10 p pvalue from the LRT
OR_div <- function(obj, p){ OR_CI <- paste0(round(exp(obj$beta[2]),2), " (", round(exp(obj$ci.lb[2]),2),"-", round(exp(obj$ci.ub[2]),2), ")") pval_clean <- ifelse(p[["pval"]] < 0.001, "p<0.001", paste0("p=",round(p[["pval"]],3))) OR_p <- c(OR_CI, p, pval_clean) return(OR_p) }
Statistical Modeling
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
462
function to get metaregression model with continuous X variable and proportion Y variable and ggplot showing the predicted relationship x X variable as character y Y variable as character Xpred vector of X values to get predictions for labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative position of text with OR and pvalue
MA_prop_plot <- function(NB, x, y, Xpred, labX, labY, limY, title, ypos){ prop_dat <- na.omit(DATA[,c(NB, y, x)]) colnames(prop_dat)<-c("Ni","Ei","X") prop_dat$prop <- (prop_dat$Ei/prop_dat$Ni) * 100 prop_dat$X_div <- prop_dat$X/10 es_prop_dat <- escalc( xi= Ei, ni =Ni, data = prop_dat, measure = "PLO", to="if0all")
Visualization
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
463
function to get OR for subgroup variables with 2 categories x character string for the x variable y character string for the y variable group chracter string for the group variable newref character string for the level of the group variable that is not the reference level
OR_group_fun <- function(x, y, group, newref){ OR_data <- data.frame(Group = rep(NA,2), N = rep(NA,2), OR = rep(NA,2), lowCI = rep(NA,2), upCI = rep(NA,2), p = rep(NA,2), I2 = rep(NA,2)) group_dat <- na.omit(DATA[,c("number_follow_up", y, x, group)]) colnames(group_dat)<-c("Ni","Ei","X","group") group_dat$X_div <- group_dat$X/10 OR_data$Group <- levels(group_dat$group) OR_data$N <- as.integer(table(group_dat$group)) es_group_dat <- escalc( xi= Ei, ni =Ni, data = group_dat, measure = "PLO", to="if0all") inter_mod1 <- rma(yi=yi,mods = ~ X_div * group, vi=vi, data=es_group_dat, method = "REML") inter_mod2 <- rma(yi=yi,mods = ~ X_div * I(relevel(group, newref)) , vi=vi, data=es_group_dat, method = "REML")
Data Variable
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
464
function to get metaregression model with continuous X1 and X2 variables and 2 group variables and proportion Y variable and ggplot showing the predicted relationship with x1 for a median value of x2 x1 X1 variable as character x2 X2 variable as character group1 group1 variable (2level factor) as character group2 group2 variable (2level factor) as character y Y variable as character x1pred vector of values for which to predict x1 x2val median value of X2 for which to get predictions labX X label as character labY Y label as character limY axis limits for Y axis, vector of 2 values title plot title as character ypos y relative for text of OR and pvalue
MA_prop_plot_adj <- function(NB, x1, x2, group1, group2, y, x1pred, x2val, labX, labY, limY, title, ypos){ prop_dat <- na.omit(DATA[,c(NB, y, x1, x2, group1, group2)]) colnames(prop_dat)<-c("Ni","Ei","X1", "X2", "group1", "group2") prop_dat$prop <- (prop_dat$Ei/prop_dat$Ni) * 100 prop_dat$X1_div <- prop_dat$X1/10 es_prop_dat <- escalc( xi= Ei, ni =Ni, data = prop_dat, measure = "PLO", to="if0all")
Statistical Modeling
https://osf.io/cxv5k/
R_functions_Kimmoun_et_al_final.R
465
rayleigh_test Rayleigh test for the uniformity of directional data, as described by Mardia et al. (1979: chapter 15) and Mardia & Jupp (1999) Note that there would probably be better alternatives on CRAN
rayleigh_test <- function(X, correction = TRUE, check = TRUE, convert = TRUE) { if(check) { if(!isTRUE(all.equal(diag(tcrossprod(X)), 1))) { if(convert) { X / sqrt(diag(tcrossprod(X))) warning("X was converted into directional data") } else { stop("Directional data (in coordinates) are assumed as X") } } } p <- ncol(X) n <- nrow(X) Mean <- colMeans(X) Norm_Mean_2 <- drop(crossprod(Mean)) S <- p * n * Norm_Mean_2 if(correction) { S <- S * (1 - 1 / 2 / n) + S ^ 2 / (2 * n * (p + 2)) } P_value <- pchisq(S, p, lower.tail = FALSE) list(Norm_Mean = sqrt(Norm_Mean_2), n = n, p = p, Statistics = S, P_value = P_value) }
Statistical Test
https://osf.io/6ukwg/
utility_functions.R
466
Compute ICC run null/unconditional model and use intercept and residual variances to solve for ICC (i.e., Intercept Variance / (Intercept Variance + Residual Variance))
Model.Null <- lmer(VerbPhysAggSum ~ 1 + (1 | ID), data = LineBisectionStudy) summary(Model.Null) ICC <- (18.69/(18.69+7.76)) print(ICC)
Statistical Modeling
https://osf.io/vprwb/
MLM--lmerTest--JPNpsyUpdated.R
467
Function to extract legend (from: https://gist.github.com/crsh/be88be19233f1df4542aca900501f0fbfilegglegendrL7) otherwise plots without tracks would have no legend
gglegend <- function(x){ tmp <- ggplot_gtable(ggplot_build(x)) leg <- which(sapply(tmp$grobs, function(y) y$name) == "guide-box") tmp$grobs[[leg]] } legend = gglegend(gi)
Visualization
https://osf.io/amd3r/
D_Wind_support_and_track_animation.R
468
define function doing the bootstrap:
boot.fun<-function(x, xcall., data., rv.name., m., discard.non.conv., save.path., extract.all.){ xdone=F while(!xdone){ done2=F while(!done2){ data.[, rv.name.]=simulate(object=m.)[, 1] if(xcall[["xfam"]][["family"]]!="beta" | (xcall[["xfam"]][["family"]]=="beta" & min(data.[, rv.name.])>0 & max(data.[, rv.name.])<1)){ done2=T } } i.res=try(update(m., data=data.), silent=T)
Data Variable
https://osf.io/vjeb3/
boot_glmm.r
469
prepare model frame for conditional model for prediction:
model=attr(terms(as.formula(xcall[["cond.form"]])), "term.labels") model=model[!grepl(x=model, pattern="|", fixed=T)] if(length(model)==0){model="1"} cond.m.mat=model.matrix(object=as.formula(paste(c("~", paste(model, collapse="+")), collapse="")), data=new.data) if(set.circ.var.to.zero){ cond.m.mat[,paste(c("sin(", circ.var.name, ")"), collapse="")]=0 cond.m.mat[,paste(c("cos(", circ.var.name, ")"), collapse="")]=0 }
Statistical Modeling
https://osf.io/vjeb3/
boot_glmm.r
470
Function that draws any bellcurve in the form and shape as the ones used in the paper
docurve<-function( sd=2, dist=1, xcent=10, ycent=0.6, paroff=0.15, offoff=0.1, percoff=0.06, percoff2=0.03, thresh.vis=0.01, bellims=15, botlims=8, pardown=0.01, xoff=0, par.col="#0097BD", off.col="#FF0066", off.in.col="#FF0066", off.out.col="#FF6600", fading="33", col.bot="#555555", lwd.par=1.2, lwd.bot=1.2){ off.in.col2<-paste(off.in.col,fading,sep="") off.out.col2<-paste(off.out.col,fading,sep="") p1<-xcent-dist/2 p2<-xcent+dist/2 c1<-gen.curve(sd,average=0,bellims=bellims) polygon(replast(c1[[1]][c1[[1]]<(-dist/2)])+xcent,c(c1[[2]][c1[[1]]<(-dist/2)],0)+ycent,col=off.out.col2,border=NA) polygon(repfirst(c1[[1]][c1[[1]]>(dist/2)])+xcent,c(0,c1[[2]][c1[[1]]>(dist/2)])+ycent,col=off.out.col2,border=NA) polygon(repboth(c1[[1]][c1[[1]]>=(-dist/2)&c1[[1]]<=(dist/2)])+xcent,c(0,c1[[2]][c1[[1]]>=(-dist/2)&c1[[1]]<=(dist/2)],0)+ycent,col=off.in.col2,border=NA) lines(c(xcent-botlims,xcent+botlims),c(ycent,ycent),col=col.bot,lwd=lwd.bot) lines(c(p1,p1),c(ycent-pardown,ycent+paroff),col=par.col,lwd=lwd.par) lines(c(p2,p2),c(ycent-pardown,ycent+paroff),col=par.col,lwd=lwd.par) outprob<-sum(c1[[2]][c1[[1]]>dist/2])/sum(c1[[2]]) inprob<-sum(c1[[2]][c1[[1]]<=dist/2&c1[[1]]>=-dist/2])/sum(c1[[2]]) offtail<-mean(c(c1[[1]][c1[[1]]>dist/2][1], c1[[1]][c1[[1]]>dist/2][!(c1[[2]][c1[[1]]>dist/2]>thresh.vis)][1])) text(xcent,ycent+percoff,paste(round(inprob*100),"%",sep=""),col=off.in.col,cex=0.8) text(c(xcent-offtail,xcent+offtail)+c(-1,1)*xoff,ycent+percoff2,paste(round(outprob*100),"%",sep=""),col=off.out.col,cex=0.8) text(c(p1,p2),ycent+paroff,c(expression(t[p[1]]),expression(t[p[2]])),pos=3,col=par.col,offset=0.15) }
Visualization
https://osf.io/pvyhe/
Figure1.R
471
loglikelihood for each subject using their mean parameter vector
mean_pars_ll <- numeric(ncol(mean_pars)) data <- transform(sampled$data, subject = match(subject, unique(subject))) for (j in 1:nsubj) { mean_pars_ll[j] <- sampled$ll_func(mean_pars[j, ], data = data[data$subject == j,], sample = FALSE) }
Statistical Modeling
https://osf.io/tbczv/
pmwgDIC.r
472
Create the plots and populate them. The yaxt "n" serves to remove axis labels from the yaxis while the axis function serves to add axis labels in line with APS standards for graph publication. The lines and points functions add the connecting lines and symbols to the graphs. We repeat the process three times to create the three graphs corresponding to different values of tau.
plot(k, end.data.cov[ ,1], type = "l", ylab = "Coverage", main = "A: Tau = 0.2", yaxt = "n", ylim = c(0.9, 1),xlab = expression("Number of Studies " (italic(k)))) axis(side = 2, at = c(0.92,0.94,0.96,0.98,1),labels = c(".92",".94",".96",".98","1.0")) points(k, end.data.cov[ ,1], pch = 0) lines(k, end.data.cov[ ,2]) points(k, end.data.cov[ ,2], pch = 3) lines(k, end.data.cov[ ,7]) points(k, end.data.cov[ ,7], pch = 5) lines(k, end.data.cov[ ,14]) points(k, end.data.cov[ ,14], pch = 4) legend("bottomright", c("bWT-DL","bWT-REML", "bWT-HE", "Sub-Q"), pch = c(0,5,4,3), cex = 0.9, ncol = 2) plot(k, end.data.cov[ ,3], type = "l", ylab = "Coverage", main = "B: Tau = 0.4", yaxt = "n", ylim = c(0.9, 1),xlab = expression("Number of Studies " (italic(k)))) axis(side = 2, at = c(0.92,0.94,0.96,0.98,1),labels = c(".92",".94",".96",".98","1.0")) points(k, end.data.cov[ ,3], pch = 0) lines(k, end.data.cov[ ,4]) points(k, end.data.cov[ ,4], pch = 3) lines(k, end.data.cov[ ,9]) points(k, end.data.cov[ ,9], pch = 5) lines(k, end.data.cov[ ,17]) points(k, end.data.cov[ ,17], pch = 4) legend("bottomright", c("bWT-DL","bWT-REML", "bWT-HE", "Sub-Q"), pch = c(0,5,4,3), cex = 0.9, ncol = 2) plot(k, end.data.cov[ ,5], type = "l", ylab = "Coverage", main = "C: Tau = 0.6", yaxt = "n", ylim = c(0.9, 1),xlab = expression("Number of Studies " (italic(k)))) axis(side = 2, at = c(0.92,0.94,0.96,0.98,1),labels = c(".92",".94",".96",".98","1.0")) points(k, end.data.cov[ ,5], pch = 0) lines(k, end.data.cov[ ,6]) points(k, end.data.cov[ ,6], pch = 3) lines(k, end.data.cov[ ,11]) points(k, end.data.cov[ ,11], pch = 5) lines(k, end.data.cov[ ,20]) points(k, end.data.cov[ ,20], pch = 4) legend("topright", c("bWT-DL","bWT-REML", "bWT-HE", "Sub-Q"), pch = c(0,5,4,3), cex = 0.9, ncol = 2)
Visualization
https://osf.io/gwn4y/
Line_plots.R
473
Define the initial column name for x/y coordinates you want to use
xy_columns = list( x = "GazePointX (ADCSpx)", y = "GazePointY (ADCSpx)" ),
Data Variable
https://osf.io/mp9td/
interface.R
474
compute cutoff for guessing by taking the 99% quantile from the binomial distribution (given by guessing probability and number of tests)
mutate(guessing_probability = 1/19, cut_off = qbinom(p = .99, size = number_tests, prob = guessing_probability)/number_tests, guessing_check = ifelse(mean_acc > cut_off, TRUE, FALSE)) %>% group_by(participant_id, condition) %>% summarize(guessing_check = as.logical(min(guessing_check))) %>% ungroup()
Statistical Modeling
https://osf.io/dpkyb/
data-processing.R
475
split the data into different data frames depending on the intended analysis create dataframe with data on the hebb effect for the main analysis of the learning data
data_hebb_task = data_filtered %>% filter(phase == "WM") %>% select(participant_id, condition, phase, block, trial_number, hebb_trial, presentation_order:duration) %>% ungroup()
Data Variable
https://osf.io/dpkyb/
data-processing.R
476
check cluster agreement using hierarchical clustering on raw items instead of factors
HCheck.HClusterD <- dist(EFA.mainData[, c(2:56)], method = "euclidean") HCheck.HClusterFit <- hclust(HCheck.HClusterD, method="ward.D2") HCheck.HCluster <- cutree(HCheck.HClusterFit, k = 3) HCheckCluster <- data.frame(Factor = HCluster, Item = HCheck.HCluster) HCheckCluster <- cbind(HCheckCluster, ifelse(HCheckCluster[1] == HCheckCluster[2], 1, 0)) colnames(HCheckCluster)[3] <- 'Agreement' HCheckClusterAgreement <- HCheckCluster[, c(2:3)] %>% group_by(Item) %>% summarise(total = n(), n_agree = sum(Agreement)) %>% rename(Cluster = Item)
Statistical Modeling
https://osf.io/2j47e/
Cluster analysis refined factor scores.R
477
create dataframe with factor scores and cluster membership
KClusterScores <- cbind(EFA.mainData[, c(111:116)], KClusterFit$cluster) colnames(KClusterScores)[7] <- "KCluster"
Data Variable
https://osf.io/2j47e/
Cluster analysis refined factor scores.R
478
testing homogeneity of variance in factors
KClusterScores %>% pivot_longer(c(Sensory, CognitiveDemand, ThreatToSelf, CrossSettings, Safety, States), names_to = c("Factor")) %>% group_by(Factor) %>% levene_test(value ~ as.factor(KCluster))
Statistical Test
https://osf.io/2j47e/
Cluster analysis refined factor scores.R
479
univariate Welch's ANOVA test with Bonferroni correction
table1 <- KClusterScores %>% pivot_longer(c(Sensory, CognitiveDemand, ThreatToSelf, CrossSettings, Safety, States), names_to = c("Factor")) %>% mutate(Factor = factor(Factor, levels = c("Sensory", "CognitiveDemand", "ThreatToSelf", "CrossSettings", "Safety", "States"))) %>% group_by(Factor) %>% welch_anova_test(value ~ as.factor(KCluster)) %>% adjust_pvalue(method = "bonferroni")
Statistical Test
https://osf.io/2j47e/
Cluster analysis refined factor scores.R
480
manually calculating omega squared and adjusted confidence intervals for each test
table1$omegasq <- apply(table1, 1, function(x) omega.F(as.numeric(x[5]), as.numeric(x[6]), as.numeric(x[4]), as.numeric(x[3]), 0.05/6)$omega) table1$omegalow <- apply(table1, 1, function(x) omega.F(as.numeric(x[5]), as.numeric(x[6]), as.numeric(x[4]), as.numeric(x[3]), 0.05/6)$omegalow) table1$omegahigh <- apply(table1, 1, function(x) omega.F(as.numeric(x[5]), as.numeric(x[6]), as.numeric(x[4]), as.numeric(x[3]), 0.05/6)$omegahigh)
Statistical Test
https://osf.io/2j47e/
Cluster analysis refined factor scores.R
481
Linear mixed effect model (accuracy)
LDTnonword_ACC_LME = glmer(accuracy ~ DISHARc + (1+DISHARc|item) + (1|subject), data = byTrial, family=binomial, control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=2e5))) summary(LDTnonword_ACC_LME)
Statistical Modeling
https://osf.io/gztxa/
Vowel_Harmony_LDT_Exp3.R
482
Estimate d from t and df
dest <-function (t,df) { r2 <- (t^2) / (t^2 + df) d.est <- (2*(r2^.5)) / (1-r2) }
Data Variable
https://osf.io/he8mu/
Custom_Functions.R
483
Stimuli categorized according to a multinomial distribution
y[i,] ~ dmulti(r[i,], t[i]) predy[i,1:ncat] ~ dmulti(r[i,], t[i]) }
Statistical Modeling
https://osf.io/hrf5t/
Prototype.R
484
Denominator is just the sum of the numerator values
denominator[i] = sum(numerator[i,]) }
Data Variable
https://osf.io/hrf5t/
Prototype.R
485
annotate lines so that each has a unique id for ggplot overplotting (else two lines from the same draw but different replicates can get confused with each other)
func_samples_surface <- func_samples_surface %>% mutate(line_id = as.numeric(rownames(func_samples_surface))) func_samples_aer <- func_samples_aer %>% mutate(line_id = as.numeric(rownames(func_samples_aer)))
Visualization
https://osf.io/fb5tw/
figure_main.R
486
cross product decay_rates with x (time) values and calculate y (titer) values
cat('setting up x values...\n') to_plot_surface <- func_samples_surface %>% crossing(surface_plot_times) to_plot_aer <- func_samples_aer %>% crossing(aer_plot_times) to_plot_surface <- to_plot_surface %>% mutate(predicted_titer = 10^(intercept - decay_rate * time)) to_plot_aer <- to_plot_aer %>% mutate(predicted_titer = convert_mL_media_to_L_air * 10^(intercept - decay_rate * time)) max_nonzero_time <- to_plot_surface %>% filter(log10(predicted_titer) > lowest_log_titer) %>% select(time) %>% max() surface_xlim <- c(0, max_nonzero_time) aer_xlim <- c(0, aer_max_x) print(aer_xlim) aer_jitwid <- 3/100 fit_panel_surface <- to_plot_surface %>% ggplot(aes( x = time, y = predicted_titer, color = virus, group = line_id)) + geom_line(alpha = line_alpha, size = line_size) + scale_colour_manual(values = unlist(virus_colors)) + geom_point(aes(x = time, y = 10^(log10_titer), group = trial_unique_id), data = surface_dat, color = pointborder, fill = pointfill, alpha = pointalpha, size = pointsize, stroke = pointstroke, position = position_jitter( width = jitwid, height = jith, seed = 5)) + geom_hline( data = experiment_dat_virus_surface, aes(yintercept = detection_limit), linetype = detection_linestyle, size = detection_linesize) + scale_y_continuous(trans = ytrans, breaks = ybreaks, labels = yformat) + coord_cartesian(ylim = surface_ylim, xlim = surface_xlim) + facet_grid(vars(virus), vars(material), drop = TRUE)
Data Variable
https://osf.io/fb5tw/
figure_main.R
487
group adjustment for sigma with prior 0,0.1 for the beta
ACT.BF.sigma.prior01 <- stan(data=ACT.data,file="./BayesFactor/models/ACT/ACT.BF.sigma.prior01.stan", iter=40000, warmup =1000, chains=3,control = list(adapt_delta = 0.9)) ACT.BF.sigma.prior01.bridge <- bridge_sampler(ACT.BF.sigma.prior01) saveRDS(ACT.BF.sigma.prior01.bridge, "./BayesFactor/marginal_lik/ACT/ACT.BF.sigma.prior01.rds") DA.BF.sigma.prior01 <- stan(data=DA.standata,file="./BayesFactor/models/DA/DA.BF.sigma01.stan", iter=40000, warmup =1000, chains=3) DA.BF.sigma.prior01.bridge <- bridge_sampler(DA.BF.sigma.prior01) saveRDS(DA.BF.sigma.prior01.bridge, "./BayesFactor/marginal_lik/DA/DA.BF.sigma.prior01.rds")
Statistical Modeling
https://osf.io/kdjqz/
BF_LissonEtAl2020.R
488
increase in rsq when using elastic net with items instead of linear regression with sum scores (without covariates)
tab_delta_rsq <- merge(tab[learner.id == "elastic net" & predictors == "items" & control == "excluded", .(elasticnet_items = rsq.test.mean), by = c("target", "design")], tab[learner.id == "linear regr" & predictors == "sum scores" & control == "excluded", .(linearregr_sumscores = rsq.test.mean), by = c("target", "design")]) tab_delta_rsq[, .(mean_delta_rsq = mean(elasticnet_items - linearregr_sumscores)), by = "design"]
Statistical Modeling
https://osf.io/t7a28/
collect_results.R
489
Calculate Cohen's d_z
return_Cohen_d_z <- function(variable_1, variable_2){ mean_differences <- mean(variable_1) - mean(variable_2) sd_var1 <- sd(variable_1) sd_var2 <- sd(variable_2) cor_var1_var2 <- cor(variable_1, variable_2) Cohen_d_z <- abs(mean_differences) / sqrt( (sd_var1 ^ 2) + (sd_var2 ^ 2) - (2 * sd_var1 * sd_var2 * cor_var1_var2) ) return(Cohen_d_z) }
Statistical Test
https://osf.io/5te7n/
return_effect_sizes.R
490
visualizing the data add +scale_x_discrete(guide guide_axis(angle 45)) to the plot to change the angle of the lables on the X axis
ggplot(mydata_p6_la, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45)) ggplot(mydata_p6_ra, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45)) ggplot(mydata_p6_lp, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45)) ggplot(mydata_p6_rp, aes(x = Condition, y = Voltage)) + geom_boxplot() + facet_grid(.~ Specificity,)+ scale_x_discrete(guide = guide_axis(angle = 45))
Visualization
https://osf.io/p7zwr/
P600.R
491
fill output matrix with the calculated social measure
socialmeasures[,3] <- measure1 socialmeasures[,4] <- measure2 socialmeasures[,5] <- measure3 socialmeasures[,6] <- measure4 socialmeasures[,7] <- measure5 socialmeasures[,8] <- measure6 socialmeasures[,10] <- measure8 socialmeasures[,11] <- measure9 socialmeasures[,12] <- measure10 socialmeasures[,3] <- measure1 socialmeasures[,4] <- measure2 socialmeasures[,5] <- measure3 socialmeasures[,6] <- measure4 socialmeasures[,7] <- measure5 socialmeasures[,8] <- measure6 socialmeasures[,10] <- measure8 socialmeasures[,11] <- measure9 socialmeasures[,12] <- measure10
Data Variable
https://osf.io/wc3nq/
2) soc_measures_code.R
492
7 calculate the AVERAGE SHORTEST PATH of each node of the matrix
measure7 <- mean_distance(graphN, directed = TRUE, unconnected = TRUE) print(measure7) measure7 <- mean_distance(graphN, directed = TRUE, unconnected = TRUE) print(measure7)
Statistical Modeling
https://osf.io/wc3nq/
2) soc_measures_code.R
493
Add Age Bins This acts to help preserve privacy and allows easy plotting: e.g., ggplot(Data, aes(x Agebins) ) + geom_bar()
Agebin = c('< 20', '20-29', '30-39','40-49','50-59','60-69','70-79','80+') Data %<>% mutate(Agebins = case_when( Age < 20 ~ Agebin[1], Age >= 20 & Age < 30 ~ Agebin[2], Age >= 30 & Age < 40 ~ Agebin[3], Age >= 40 & Age < 50 ~ Agebin[4], Age >= 50 & Age < 60 ~ Agebin[5], Age >= 60 & Age < 70 ~ Agebin[6], Age >= 70 & Age < 80 ~ Agebin[7], Age >= 80 ~ Agebin[8] ) ) %>% apply_labels(Agebins = c('< 20' = 1, '20-29' = 2, '30-39' = 3, '40-49' = 4,'50-59' = 5,'60-69' = 6, '70-79' = 7,'80+' = 8)) Agebin = c('< 20', '20-29', '30-39','40-49','50-59','60-69','70-79','80+') Data %<>% mutate(Agebins = case_when( Age < 20 ~ Agebin[1], Age >= 20 & Age < 30 ~ Agebin[2], Age >= 30 & Age < 40 ~ Agebin[3], Age >= 40 & Age < 50 ~ Agebin[4], Age >= 50 & Age < 60 ~ Agebin[5], Age >= 60 & Age < 70 ~ Agebin[6], Age >= 70 & Age < 80 ~ Agebin[7], Age >= 80 ~ Agebin[8] ) ) %>% apply_labels(Agebins = c('< 20' = 1, '20-29' = 2, '30-39' = 3, '40-49' = 4,'50-59' = 5,'60-69' = 6, '70-79' = 7,'80+' = 8))
Visualization
https://osf.io/sw7rq/
Functions.R
494
ChiSquared Gender create dataframe of gender and cluster membership, then exclude participant with 'Other' as response to analyse with Chisquared test
data.OtherExcluded <- demographics %>% select(Gender, KCluster) %>% filter(Gender != 'Other') data.OtherExcluded$Gender <- droplevels(data.OtherExcluded$Gender)
Statistical Test
https://osf.io/2j47e/
Demographics.R
495
posthoc pairwise Chisquared with Bonferroni correction
pairwiseNominalIndependence(ASD.contable, compare = "row", fisher = FALSE, gtest = FALSE, chisq = TRUE, method = "bonferroni", digits = 3)
Statistical Test
https://osf.io/2j47e/
Demographics.R
496
compile parameter estimates and fit of population ODE/SDE models
modt <- readRDS("PSM_transits_ODE.RDS") mods <- readRDS("PSM_population_ODE.RDS") i <- length(mods) pars <- c(round(modt[[4]]$THETA,1), "OMEGA_stress"=1.5, "OMEGA_ke"=.05, "OMEGA_kt"=.15, "OMEGA_init"=.15) pars[c("init","sigma")] <- c(.001,.001) parA <- list(LB=pars*.2, Init=pars, UB=pars*2.5) #bounds + inits npars <- Vectorize(function(x) sum(parA$Init != round(mods[[x]]$THETA,3)))(1:i) res <- Vectorize(function(x) round(mods[[x]]$THETA,3))(1:i) #parameter estimates for each fitted model res <- rbind(res, "Mtt"= Vectorize(function(x) round(4/mods[[x]]$THETA["kt"],3))(1:i)) #add mean transit time for each model res <- rbind(res, "LL"= -Vectorize(function(x) mods[[x]]$NegLogL)(1:i)) res <- rbind(res, "AIC"= 2*npars +2*Vectorize(function(x) mods[[x]]$NegLogL)(1:i)) res <- rbind(res, "R2"= Vectorize(function(x) 1 - mods[[x]]$THETA["S"]/var(PKdata$DV))(1:i)) colnames(res) <- letters[1:i] print(res)
Statistical Modeling
https://osf.io/ecjy6/
CortStressResponse.R
497
define a fixed grid of cutoff (threshold) values from [1, 0] with length `resolution`
resolution <- 500 cutoff_out <- seq(1, 0, length.out = resolution) resolution <- 500 cutoff_out <- seq(1, 0, length.out = resolution)
Data Variable
https://osf.io/w7pjy/
bootstrapConfusionMatrix.R
498
Compute F1 skill score... ' ...for a given cutoffthreshold. ' @param sim vector of numeric values between [0, 1] (e.g., proportion of unstable grid points) ' @param obs vector of logicals (TRUE/FALSE) stating whether the layer was observed (of concern) or not. ' @param cutoff in percentage within (0, 1] ' @return numeric value of skill score ' @export
calculateF1Score <- function(sim, obs, cutoff) { tp <- length(sim[sim >= cutoff & obs]) fn <- length(sim[sim < cutoff & obs]) fp <- length(sim[sim >= cutoff & !obs]) f1 <- 2*tp / (2*tp + fp + fn) return(f1) }
Data Variable
https://osf.io/w7pjy/
bootstrapConfusionMatrix.R
499
Underscore split: split up the "variable" column in order to get the different factors (verbtype, input, moment)
participantsdata.long <- cbind(participantsdata.long, colsplit(participantsdata.long$variable, "_", names = c("verbtype", "input", "testmoment")))
Data Variable
https://osf.io/938ye/
Descriptive_statistics.R
500
LDA training the model
model_lda = train(class ~ ., data=trainSet, method='lda', trControl = trainControl(method = "cv")) fitted <- predict(model_lda)
Statistical Modeling
https://osf.io/xuz8d/
ThesisMLRCode.R