ID
int64
1
1.07k
Comment
stringlengths
8
1.13k
Code
stringlengths
10
4.28k
Label
stringclasses
4 values
Source
stringlengths
21
21
File
stringlengths
4
82
501
function to extract the sequence "Obj_xxx" untile a period "." appears
slice_object_string <- function(string) { gsub(".*Obj_(.*)\\..*", "\\1", c(string)) } familiar_objects <- unlist(lapply(string_list, slice_object_string))
Data Variable
https://osf.io/yfegm/
getObjects.r
502
ChiSquare tests Relations between diet type and major themes
chisq.test(data$Diet, data$Health) #p = 0.94, chi-squared = 0.01 chisq.test(data$Diet, data$Food) #p = 0.44, chi-squared = 0.59 chisq.test(data$Diet, data$Social) #p = 0.80, chi-squared = 0.06 chisq.test(data$Diet, data$Logistic) #p = 0.17, chi-squared = 1.90 chisq.test(data$Diet, data$Finance) #p = 0.06, chi-squared = 3.61 chisq.test(data$Diet, data$Motivat.) #p = 0.76, chi-squared = 0.10 chisq.test(data$Diet, data$Diet.Cons.) #p = 0.05, chi-squared = 3.97 chisq.test(data$Diet, data$Other) #p = 1, chi-squared = 8.90e-29 chisq.test(data$Diet, data$Positive) #p = 0.16, chi-squared = 1.94
Statistical Test
https://osf.io/q2zrp/
Chi-squaretests.R
503
Hypotheses Fit multivariate regression model
moltenformula <- as.formula("value ~ v - 1 + v:scipopgoertz + v:age + v:sex + v:lingreg_D + v:lingreg_I + v:urbanity_log + v:edu_uni + v:edu_com + v:sciprox_score + v:sciliteracy + v:rel + v:pol + v:inter_science + v:trust_science + v:trust_scientists + v:scimedia_att + v:scimedia_sat") m_inx <- svyglm(moltenformula, design = svydsgn_melt_inx)
Statistical Modeling
https://osf.io/yhmbd/
02_main-analysis.R
504
estimate bifactor model (maximum likelihood)
fit <- cfa(model, dat,estimator = "MLM", std.lv = TRUE)
Statistical Modeling
https://osf.io/qk3bf/
iip_estimate_bifactor.R
505
draw random factor scores from a multivariate normal distribution
lat_scores <- mvrnorm(n = n, mu = mu, Sigma = sigma)
Statistical Modeling
https://osf.io/qk3bf/
iip_estimate_bifactor.R
506
function to create data summaries in figures
data_summary <- function(x) { m <- mean(x) ymin <- m-ci(x) ymax <- m+ci(x) return(c(y=m,ymin=ymin,ymax=ymax)) }
Visualization
https://osf.io/mj5nh/
educationestimatecentralitydefaults.R
507
to get numbers for confidence intervals click on item in global environment and expand plot 95% credible intervals
x<-plot(me, plot = FALSE)[[1]] + scale_color_grey() + scale_fill_grey() p<-x+ylim(0,1)+ theme(panel.grid.major = element_line(colour="gray"), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_blank(),panel.grid.major.x = element_blank())+xlab("")+ylab("") p
Visualization
https://osf.io/a8htx/
SSK_Cleaned_copy.R
508
1 Never, 2 I tried it but don't do it systematically, 3 I do it when it feels convenient, 4 I do it for most research projects/studies, 5 I do it for every research project/study. Make dataframe 'Wish' with intentions to engage in open science practices
wish = data[, 17:25] wish_complete = wish[,-9] # remove missing data wish = apply(wish, 2, as.numeric) wish = as.data.frame(unlist(wish)) colnames(wish) = c("preregister", "sharedatas", "sharecode", "openaccess", "preprint", "openpeer", "opensw", "replicate", "other") wish = t(wish) rowMeans(wish, na.rm = T) # mean apply(wish, 1, sd, na.rm = T) # standard deviation
Data Variable
https://osf.io/zv3uc/
analysis.R
509
generates 95% confidence intervals for each beta coefficient
m.stakes_decision.CI <- round(confint(m.stakes_decision, parm = "beta_"), 3)
Statistical Test
https://osf.io/uygpq/
Cross-paradigm.R
510
2.3 Loss tangents We draw a diagram of loss tangents with logarithmic scale on the xaxis
plot(graphData$Freq[graphData$site == "wreck"], graphData$tan[graphData$site == "wreck"], log = "x", axes = FALSE, xlab = "", ylab = "", type = "b", pch = PCH, col = col_wreck, xlim = c(0.01, 100), ylim = c(0, 15)) par(new = TRUE, ps = PS, mar = MAR) plot(graphData$Freq[graphData$site == "beach"], graphData$tan[graphData$site == "beach"], type = "b", log = "x", xlab = expression("Frequency [rad s"^-1 *"]"), ylab = expression(paste("tan ", delta)), pch = PCH, col = col_beach, axes = FALSE, xlim = c(0.01, 100), ylim = c(0, 15)) axis(1, at = c(0, 0.1, 1, 10, 100, 1000), labels = c(0, 0.1, 1, 10, 100, 100)) axis(2, at = c(-1 , 0, 3, 6, 9, 12, 15)) text(x = 0.01, y = 15, label = expression(bold("c"))) abline(1, 0)
Visualization
https://osf.io/9jxzs/
07_analysis_rheology.R
511
estimate mean average difference between observed and corrected values check for sign reversals
dat_analysis[sign(MA_ES_self_corr) == sign(MA_ES_self_obs), .N] # 46 (no sign rev)
Statistical Modeling
https://osf.io/dqc3y/
REL_corr_obs_descr.R
512
Step 3: Fit varyingintercept ESEM model
esem_anti_fit_alt1F <- cfa(esem_anti_model_alt1F, esem_anti_data_alt, group = "country_iso", estimator = "MLM", group.equal = "loadings")
Statistical Modeling
https://osf.io/w4gey/
02_measures.R
513
performing the chisquared comparisons across all the possible pairs
pairwise.table(chi_squared_paired_comparison_1, rownames(hypotheses_per_research_area), p.adjust.method="none") pairwise.table(chi_squared_paired_comparison_2, rownames(hypotheses_per_research_area), p.adjust.method="none")
Statistical Test
https://osf.io/4ya6x/
R Code First Year Paper Cas Goos Analysis.R
514
chi squared test of criteria met by association type
f_statistic <- chisq.test(data_hypotheses_long$association_type, data_hypotheses_long$reporting_criteria_met) f_statistic
Statistical Test
https://osf.io/4ya6x/
R Code First Year Paper Cas Goos Analysis.R
515
Hastags wordcloud of most common hashtags
frequenthashes <- hashtable[hashtable>floor(quantile(hashtable,.999))] hashvec <- unlist(lapply(frequenthashes,function(x) rep(names(x),x))) stretched <- log10(frequenthashes-min(frequenthashes)+1)/max(log10(frequenthashes-min(frequenthashes)+1)) cols <- rgb(1- stretched,0,0) png("hashcloud.png",width = 12,heigh=12,units = "cm",res=300) wordcloud(names(frequenthashes),frequenthashes,random.order = F,colors=cols) dev.off()
Visualization
https://osf.io/u3wby/
twitter_analyze.R
516
Fit the model using unweighted least square (ULS)
m1.fit <- sem( m1, sample.cov = S, sample.nobs = 200, estimator = "ULS")
Statistical Modeling
https://osf.io/vy763/
Acloserlookatfixed-effectsregressioninsem_supplementarymaterials.R
517
Remove items that do not occur in the country
items <- items[items$item %in% colnames(x_country), ]
Data Variable
https://osf.io/8fzns/
3H_IRT_helper.R
518
multilevel model (repeated measures) correlation using package rmcorr:
td_corr <- rmcorr(subject, trust, distrust, td) td_corr
Statistical Modeling
https://osf.io/kwp6n/
Everyday_Trust_Rcode.R
519
Robust growth model MLR estimator
rctBoot <- growth(rctModel, data=rctWide, se = "bootstrap") summary(rctBoot) rctMLR <- growth(rctModel, data=rctWide, estimator = "MLR") summary(rctMLR)
Statistical Modeling
https://osf.io/fbj3z/
R Script for Field & Wilcox (2017).R
520
create a scatterplot of sleep quality and insomnia scores (FIRST). Add appropriate labels
plot(Sleep_and_COVID19$PSQI_global, Sleep_and_COVID19$FIRST, main = "Main title", xlab = "X axis title", ylab = "Y axis title", frame = FALSE)
Visualization
https://osf.io/94jyp/
Ex2_BasicAnalysis_Answers.R
521
4) Ttest Examine whether there is a significant difference in sleep quality in those who may have symptoms or been diagnosed with COVID
t.test(PSQI_global ~ SymptomsOrPositive, data = Sleep_and_COVID19)
Statistical Test
https://osf.io/94jyp/
Ex2_BasicAnalysis_Answers.R
522
Examine whether there is a significant difference in mean sleep difficulty scores in those who undertake shiftwork
t.test(PSQI_global ~ ShiftWork, data = Sleep_and_COVID19) ?t.test
Statistical Test
https://osf.io/94jyp/
Ex2_BasicAnalysis_Answers.R
523
6) Regression Analysis Using multiple regression analysis, examine the influence of the age, hours worked, physical health, coffee drank alertness and insomnia proneness on sleep quality
model1 <- lm(PSQI_global ~ Age + WorkHour + Health + CoffeeToday + StanfordSleepinessScale + FIRST, data = Sleep_and_COVID19) summary(model1)
Statistical Modeling
https://osf.io/94jyp/
Ex2_BasicAnalysis_Answers.R
524
Draw samples setting heart_rate_traj above as mean with appropriate constant variance (10 beats/min)
heart_rate <- rnorm(n = length(heart_rate_traj), mean = heart_rate_traj, sd = 10)
Statistical Modeling
https://osf.io/skp56/
CSEP_DataScienceExPhys_DataGen_Akerman.R
525
Computing pvalues based on the Fdistribution
p_durable <- pf(q=Femp_durable, df1=Df_aest_durable, df2=Df_residual, lower.tail = F) p_nond <- pf(q=Femp_nond, df1=Df_aest_nond, df2=Df_residual, lower.tail = F)
Statistical Test
https://osf.io/n3zfp/
Aesthetic-Fidelity-Effect-Statistical Analyses.r
526
Logistic Regression Model Creating null model and a logistic regression model based on all variables
lr.null <- glm(Treatment ~ 1, data = dat2, family = binomial(link="logit")) lrm <- glm(Treatment ~ Sq + Ssk + Sku + Sp + Sv + Sz + Sa,data = dat2, family = binomial(link="logit")) summary(lrm)
Statistical Modeling
https://osf.io/fvw2k/
Murrayetal_SurfaceR_Code.R
527
Filter and select variables of interest
d <- d_2011 %>% subset(sns == 1) %>% # indicated to use social media subset(!is.na(beh_disclose_soc)) %>% select(id, country, year, gender, age, cons_record, cons_zweck, cons_target, contains("soc"), v143:v156, # beh_setting, int_home, int_work, sns, nth_arg) %>% mutate(cons_mean = (cons_record + cons_zweck + cons_target)/3, cons_mean_bin = ifelse(cons_mean > median(cons_mean, na.rm = T), 1, ifelse(is.na(cons_mean), NA, 0))) %>% mutate(age_group = ifelse(age <= 32, "younger than 32 years", "older than 32 years"))
Data Variable
https://osf.io/m72gb/
analysis_disclosure.r
528
Confirmatory factor analyses CFA for privacy concerns (tauequivalent)
cfa.model <- " priv_con =~ 1*cons_record + 1*cons_zweck + 1*cons_target " fit <- cfa(cfa.model, d_2011) summary(fit, std = T, fit = T) reliability(fit)
Statistical Modeling
https://osf.io/m72gb/
analysis_disclosure.r
529
Estimate models across all specifications Customized model functions to include random country effect
linear <- function(formula, data) { pb$tick() # set tick for progress bar lmer(paste(formula, "+ (1|country)"), data = data) }
Statistical Modeling
https://osf.io/m72gb/
analysis_disclosure.r
530
Decomposing variance Estimate multilevel model (without predictors)
m_lin <- lmer(stdcoef ~ 1 + (1|x) + (1|controls) + (1|subsets) + (1|x:controls) + (1|x:subsets) + (1|subsets:controls), data = results) summary(m_lin)
Statistical Modeling
https://osf.io/m72gb/
analysis_disclosure.r
531
run polynomial regresion as a OLS linear model
f <- paste0(DV, " ~ ", paste("1", IV1, IV2, IV12, IV_IA, IV22, sep=" + "), controlvariables) lm.full <- lm(f, data=df, na.action=na.exclude)
Statistical Modeling
https://osf.io/m6pb2/
helpers.R
532
Matrix of phenotypic change trajectories X and Z, interlineage correlation matrix C, and intertrait crossproduct matrix A, as well as the number of lineages n ( 13) and dimensionality of the vectors p ( 76)
X <- f.mean.diff(data.sc, fishID) Z <- X / sqrt(diag(tcrossprod(X))) C <- tcrossprod(Z) A <- crossprod(Z) n <- nrow(Z) p <- ncol(Z) - 4 # -4 accounting for the loss of d.f. from Procrustes alignment
Data Variable
https://osf.io/6ukwg/
codes_reanalysis.R
533
Histograms comparing dissimilarity between the original and bootstrap replicates between "Moo" and "Pac" (largest and smallest dispersion) This shows that the heteroscedasticity is real (not due to visual distortion)
hist(acos(Z[10, ] %*% Zb[10, , ]), breaks = seq(0, pi / 2, 0.1), col = "#FF000080", main = "Pac (red) versus Moo (blue)", xlab = "Angle between original and bootstrap vectors (radian)") hist(acos(Z[7, ] %*% Zb[7, , ]), add = TRUE, breaks = seq(0, pi / 2, 0.1), col = "#0000FF80")
Visualization
https://osf.io/6ukwg/
codes_reanalysis.R
534
Use kNNdistplot(d.cor.umap, k 3) to determine the value for eps
d.cor.dbscan <- dbscan(d.cor.umap, minPts = 3, eps = 1.8) data.table.large.cluster <- data.table[,d.cor.dbscan$cluster == 1] large.cluster.cor.metric <- as.dist(acos(cor(data.table.large.cluster))) large.umap <- umap( large.cluster.cor.metric, min_dist = 1, n_neighbors = 3 ) large.dbscan <- dbscan(large.umap, minPts = 3, eps = 1) large.cols <- rainbow(length(unique(large.dbscan$cluster)), start = 2/6) write.csv( d.cor.umap, 'd_acos_cor_fixed.csv', row.names = F ) write.csv( d.cor.dbscan$cluster, 'dbscan_clustering_fixed.csv' ) write.csv( large.umap, 'd_acos_cor_fixed_large_cluster.csv', row.names = F ) write.csv( large.dbscan$cluster, 'dbscan_clustering_of_large_cluster_fixed.csv' )
Visualization
https://osf.io/2qjn5/
dump_csv.R
535
Chisquare test: Difference in preference based on outcome, by condition
chisq.test(matrix(c(19,5,5,17), ncol = 2, byrow = T)) oddsratio.wald.outcome <- oddsratio.wald(matrix(c(19,5,5,17), ncol = 2, byrow = T))$measure[2] # Note: I reorganized the matrix so that r is of the same sign as that of Experiment 2 esc_2x2( grp1yes = 19, grp1no = 5, grp2yes = 5, grp2no = 17, es.type = "or" ) # this confirms Wald's odds ratio
Statistical Test
https://osf.io/qrvje/
beliefhelp15-220615.R
536
Two sample ttest: Difference in proportionate looking based on outcome, by condition
twosamplet.outcome <- t.test(pPositiveO ~ Condition, data = bh2zoom, alternative = "two.sided") twosampled.outcome <- cohen.d(pPositiveO ~ Condition, data = bh2zoom)$estimate
Statistical Test
https://osf.io/qrvje/
beliefhelp15-220615.R
537
Assess how well model captured PWLs ' ' This function calculates several indicators that help to assess how well the model captured captWKLs beyond simple layer structure. ' It get's called by the overarching evaluation script `evaluate_main.R` ' ' So, for each WKL the function computes ' ' * correlation factors for likelihood/distribution/sensitvity versus proportion of unstable grid points ' * the maximum proportion of unstable grid points during the lifetime of the WKL ' * the temporal lag between first forecaster concern (> avy problem) and first time that grid points are unstable in that layer ' different timing instances possible: first time (i) at least one grid point unstable, (ii) the majority of grid points is unstable, (iii) more than 50 % of max proportion unstable ' * the temporal lag between latest forecaster concern (> last day of avy problem) and last time that grid points are unstable in that layer ' need to find rules for becoming dormant and waking up again versus becoming inactive ' ' @param VData object;; don't include both primary/secondary and tertiary gtype_ranks! ' ' @export
assessQualityOfcaptWKL <- function(VData, band, stabilityindex = "p_unstable") { gtype_rank <- unique(VData$vframe$gtype_rank) if (any(c("primary", "secondary") %in% gtype_rank) & "tertiary" %in% gtype_rank) stop("You can only provide either primary and/or secondary, or tertiary gtype_ranks in your VData object to this function!") possible_ranks <- c("tertiary", "secondary", "primary") gtype_rank <- possible_ranks[possible_ranks %in% gtype_rank][1] nrow_max <- length(VData$wkl$wkl_uuid) OUTnames <- c( "wkl_uuid", "wkl_iscrust", "wkldate", "wkltag", "nPDays", "nPDays_anyunstable", "nPDays_median", "nPDays_halfofmax", "nPDays_20", "rho_llhd", "rho_dist", "rho_sens", "p_llhd", "p_dist", "p_sens", "pu_max", "offset_maxes", "offset_mean", "pcapt_max", "lagA_anyunstable", "lagA_median", "lagA_halfofmax", "lagA_20", "lagZ_anyunstable", "lagZ_median", "lagZ_halfofmax", "lagZ_20", "band", "stabilityindex", "gtype_rank" ) OUT <- matrix(nrow = max(1, nrow_max), ncol = length(OUTnames), dimnames = list(seq(max(1, nrow_max)), OUTnames)) for (i in seq_along(VData$wkl$wkl_uuid)) { wuid <- VData$wkl$wkl_uuid[i] wkldate <- as.character(as.Date(VData$wkl$datetag[i])) wkltag <- paste(format(as.Date(VData$wkl$datetag[i]), "%b %d"), substr(gtype_rank, start = 1, stop = 4)) wkl_iscrust <- as.logical(VData$wkl$iscrust[i])
Statistical Modeling
https://osf.io/w7pjy/
assessQualityOfcaptWKL.R
538
Correlation of life satisfaction and depression across all measurement occasions
d.all %>% select(contains("fsat"), contains("depr"), id) %>% gather(key, value, -id) %>% separate(key, c("time", "variable")) %>% spread(variable, value) %>% select(depr, fsat) %>% zero_order_corr(print = T, digits = 3) %>% select(`1`) %>% slice(2)
Statistical Test
https://osf.io/fdp39/
analysis.R
539
check if latent covariances are equal across groups
est_svc <- cfa(mod_s,quop_use, estimator = "MLR", missing = "FIML", group = "year", group.equal = c("loadings","intercepts","residuals", "lv.variances","lv.covariances"), cluster = "class")
Statistical Modeling
https://osf.io/vphyt/
Pandemic_Cohorts_vs_Pre_Pandemic_Cohorts.R
540
get factor scores from model est_svcm_sl
fs <- lavPredict(est_svcm_sl, method = "bartlett")
Statistical Modeling
https://osf.io/vphyt/
Pandemic_Cohorts_vs_Pre_Pandemic_Cohorts.R
541
Plot for distribution of emotions Arguments: original ddbb, type of emotion, plot title
create.emotion.plot <- function(case_ddbb, emotion="emotion", plot_title=NULL){
Visualization
https://osf.io/unxj2/
functions_1.R
542
Table and plot for effect sizes Arguments: original ddbb, plot title
create.effectsize.tableplot <- function(case_ddbb, plot_title = NULL){
Visualization
https://osf.io/unxj2/
functions_1.R
543
Convert average time into a ordinal variable
Tbl2$DurSecAvg1Cat <- cut(Tbl2$DurSecAvg1, breaks = quantile(Tbl2$DurSecAvg1, c(0, 1/3, 2/3, 1)), include.lowest = T) aggregate(Tbl2$DurSecAvg1, list(Tbl2$DurSecAvg1Cat), summary) levels(Tbl2$DurSecAvg1Cat) <- c("Less", "Avg", "More")
Data Variable
https://osf.io/n5j3w/
2021-12-03_AnalysisCode.R
544
Plotting Figure 3
png(filename = "Fig03_Demographics.png", width = 19, height = 10, units = "cm", res = 500, pointsize = 7) par(mfrow=c(2,3))
Visualization
https://osf.io/n5j3w/
2021-12-03_AnalysisCode.R
545
Predict node for checking differences in distributions with chisquared test
Tbl2$NodeRate <- predict(Engage, newdata = Tbl2, type = "node") table(Tbl2$NodeRate) (temp <- chisq.test(Tbl2$NodeRate, Tbl2$FeedbackEngage)) temp$observed round(100*prop.table(temp$observed, 1), 1) mosaic(temp$observed, shade = T) Tbl2$NodeRate <- predict(FeedTree, newdata = Tbl2, type = "node") table(Tbl2$NodeRate) (temp <- chisq.test(Tbl2$NodeRate, Tbl2$Useful_FeedbackMiddle)) temp$observed round(100*prop.table(temp$observed, 1), 1) mosaic(temp$observed, shade = T) Tbl2$NodeRate <- predict(ExTree, newdata = Tbl2, type = "node") table(Tbl2$NodeRate) (temp <- chisq.test(Tbl2$NodeRate, Tbl2$Useful_FeedbackMiddle)) temp$observed round(100*prop.table(temp$observed, 1), 1) mosaic(temp$observed, shade = T) head(Tbl2$RankTaskPerform)
Statistical Test
https://osf.io/n5j3w/
2021-12-03_AnalysisCode.R
546
Convert `from_name` to factor with three levels. (Facilitates data visualization).
Campaign_Messages$from_name <- factor(Campaign_Messages$from_name, levels = c ("Stephen Harper", "Justin Trudeau", "Tom Mulcair"))
Data Variable
https://osf.io/3fnjq/
campaign_messages.R
547
change the title text to size 20 and bold
axis.title = element_text(size = 20, face = "bold"), axis.title = element_text(size = 20, face = "bold"),
Visualization
https://osf.io/9e3cu/
visualization_code.R
548
change the axis label text to size 14, bold, and black color
axis.text.x = element_text(size = 14, face = "bold", color = "black"), axis.text.y = element_text(size = 14, face = "bold", color = "black")) + axis.text.x = element_text(size = 14, face = "bold", color = "black"), axis.text.y = element_text(size = 14, face = "bold", color = "black")) + scale_y_continuous(limits = c(0, 17), expand = c(0,0)) +
Visualization
https://osf.io/9e3cu/
visualization_code.R
549
Identify all neighbours within 2km
sp_nb <- spdep::dnearneigh(coords, d1 = 0, d2 = km, row.names = row.names(sp), longlat = TRUE)
Data Variable
https://osf.io/hfjgw/
00-utils.r
550
take the inverse of the distances
W <- lapply(dsts, function(x) 1 / x) W <- lapply(dsts, function(x) 1 / x)
Data Variable
https://osf.io/hfjgw/
00-utils.r
551
recode base price and overage as numeric (without $ sign)
x$b1=as.numeric(sapply(strsplit(as.character(x$b1),"\\$"),function(y) y[2])) x$b2=as.numeric(sapply(strsplit(as.character(x$b2),"\\$"),function(y) y[2])) x$b3=as.numeric(sapply(strsplit(as.character(x$b3),"\\$"),function(y) y[2])) x$c1=as.numeric(sapply(strsplit(as.character(x$c1),"\\$"),function(y) y[2])) x$c2=as.numeric(sapply(strsplit(as.character(x$c2),"\\$"),function(y) y[2])) x$c3=as.numeric(sapply(strsplit(as.character(x$c3),"\\$"),function(y) y[2]))
Data Variable
https://osf.io/wbyj7/
read-data-Exp1.r
552
create dataset with possible k values, their prior and likelihood probabilities likelihood is drawn from 1000 random samples from beta distibution
df <- tibble(k = hypo_k) %>% mutate(model_vote = 0.0727^k) %>% mutate(occur = map_int(.$model_vote, ~ length(which(polls$value >= .x))), prior = prob_k) %>% mutate(like = map(.$occur,~ rbeta(trials, .x, polls_len - .x)))
Statistical Modeling
https://osf.io/d4hjq/
02_parameter_estimation.R
553
visualise normalised posterior on a graph
p <- df %>% ggplot(aes(k, norm_posterior)) + geom_point() + geom_line() p + labs( title = "Probability of k exponent in the interval <0.89;; 0.99>", x = "Value o k exponent", y = "Normalised probability" ) + theme_minimal()
Visualization
https://osf.io/d4hjq/
02_parameter_estimation.R
554
3) CALCULATION OF ESTIMATED MARGINAL MEANS Tests for differences in response to jargon for plot
at_jarg <- list(Jargon = c("More", "Less"), UnderN = "Easy", RecognN = "Fairly", TrvlAdv_Atten = "Cons", cut = "3|4") grid_jarg <- ref_grid(clmm_usejar, mode = "exc.prob", at = at_jarg) (emm_jarg <- emmeans(grid_jarg, specs = pairwise ~ Jargon, by = "BackgrAvTraining")) plot_jarg <- summary(emm_jarg$emmeans) cont_jarg <- summary(emm_jarg$contrasts)
Statistical Modeling
https://osf.io/aczx5/
220423_Fig04_Use_JargExpl.R
555
Gradient of FDR with respect to alpha (and its implied power) across ncp for a onesided ztest Note that the gradient is increasing towards $\infty$ from both side at ncp 0 and alpha 0, but is equal to 0 at that point. The figure does not allow to depict it properly and it seems like as if it peaked at ncp around .5 and was decreasing towards 0. Green corresponds to $P(H_0) .2$, blue to $P(H_0) .5$, and red to $P(H_0) .8$.
alpha <- seq( 0, 1, length.out = 500) ncp <- seq(-5, 5, length.out = 500) FDR.2od <- fill_gradFDR.alpha(.2, alpha, ncp, FALSE) FDR.5od <- fill_gradFDR.alpha(.5, alpha, ncp, FALSE) FDR.8od <- fill_gradFDR.alpha(.8, alpha, ncp, FALSE) plot_ly(x = ~alpha, y = ~ncp, z = ~FDR.5od) %>% add_surface(contours = list( y = list( show = TRUE, project = list(y = TRUE), usecolormap = FALSE, color = "blue" ), x = list( show = TRUE, project = list(x = TRUE), usecolormap = FALSE, color = "blue" ) ), opacity = .4) %>% layout(scene = list( xaxis = list( tickvals = c(0, .05, .1, .2, .5, 1) ), yaxis = list( tickvals = pretty(ncp) ), zaxis = list( tickvals = seq(0, 1, .2), title = list(text = "FDR'") ) )) %>% add_surface(x = ~alpha, y = ~ncp, z = ~FDR.8od, opacity = .4, contours = list( y = list( show = TRUE, project = list(y = TRUE), color = "red" ), x = list( show = TRUE, project = list(x = TRUE), color = "red" ) )) %>% add_surface(x = ~alpha, y = ~ncp, z = ~FDR.2od, opacity = .4, contours = list( y = list( show = TRUE, project = list(y = TRUE), color = "green" ), x = list( show = TRUE, project = list(x = TRUE), color = "green" ) ))
Visualization
https://osf.io/kbjw9/
3DFDRPlot.R
556
List that will include table (each element should be vector of 3):
Table <- list( c("Network Characteristics",title,""), c("Comparing Global Characteristics",nameOrig, nameRepl) )
Data Variable
https://osf.io/akywf/
splithalf_table_function.R
557
The row with "def_cond" is moved to a new column and duplicated (this is done with reference to "cond_to_group")
df_tmp_2 <- full_join( df_tmp_1 %>% filter({{cond_to_compare}} != def_cond), df_tmp_1 %>% filter({{cond_to_compare}} == def_cond), by = col )
Data Variable
https://osf.io/4fvwe/
return_BF_ttests.R
558
Repeating the analyses with closeness as covariate
cor.test(data_3A$closeness_1,data_3A$closeness_2) cor.test(data_3B$closeness_1,data_3B$closeness_2) cor.test(data_3C$closeness_1,data_3C$closeness_2)
Statistical Modeling
https://osf.io/sb3kw/
Meta_Analysis.R
559
check if the number of processed partners is smaller than the number of listed partners as saved in the counter variable
nrow(Relationship_details) < My_EHC_survey$count_SP
Data Variable
https://osf.io/y5gr9/
Skip_Backwards.R
560
run anovas to check interaction between specificity and condition LA
anova_N4_la_sp = summary(aov(Voltage ~ Condition *Specificity , data = mydata_n4_la)) anova_N4_la_sp
Statistical Test
https://osf.io/p7zwr/
N400.R
561
Visualize grid cluststers
mgp.u<-c(2.3,0.6,0.5) if(tifit==T){ tiff(tifname,width=14,height=12.5,units="cm",res=600,compression="lzw") } par(oma=c(0,4,4,4)) layout(matrix(1:9,ncol=3,byrow=T),widths=c(1.28,1,1),heights=c(1,1,1.28)) doFloorC(n=1000,eta=1,char=char,mar1=c(gap,3.5,gap,gap),mgp1=mgp.u,xax=F,yax=T,xl="",col2=col2,seq.s=seq.s,seq.c=seq.c,contour=contour)
Visualization
https://osf.io/pvyhe/
grid_visualize_3by3.R
562
Create the dataset for visualizations Summarize the means (and sd for clusters)
means.cl<-tapply(res$clust,paste(res$n,res$h,res$nu,res$eta),mean,na.rm=T) sd.cl<-tapply(res$clust,paste(res$n,res$h,res$nu,res$eta),sd,na.rm=T) means.dim<-tapply(res$explained3D,paste(res$n,res$h,res$nu,res$eta),mean,na.rm=T) means.avg<-tapply(log(res$avgdist),paste(res$n,res$h,res$nu,res$eta),mean,na.rm=T) vars<-strsplit(names(means.cl)," ")
Data Variable
https://osf.io/pvyhe/
grid_visualize_3by3.R
563
Graph of the distribution of pvalues for each effect
pvalue.plot <- ggplot(melt_pvalue,aes(x = value)) + facet_wrap(~variable, ncol = 5) + geom_histogram()
Visualization
https://osf.io/unxj2/
functions_2.R
564
Graph of the distribution of standard estimates for each effect
est.std.plot <- ggplot(melt_est.std,aes(x = value)) + facet_wrap(~variable, ncol = 5) + geom_histogram() list.result <- list("dist_pvalue.plot" = pvalue.plot, "dist_est.std.plot" = est.std.plot) return(list.result)
Visualization
https://osf.io/unxj2/
functions_2.R
565
returns plot of pvalue disttribution, plot of standard estimates distribution
list.result <- list("dist_pvalue.plot" = pvalue.plot, "dist_est.std.plot" = est.std.plot) return(list.result)
Visualization
https://osf.io/unxj2/
functions_2.R
566
Model estimates plot Arguments: standard estimates distribution, pvalues distributions, use sign level TREU/FALSE, level of signicance, plot title
create.model_estimates.plot <- function (est.std_dist, pvalue_dist, significant = FALSE, sig_level=.05, title = NULL){
Visualization
https://osf.io/unxj2/
functions_2.R
567
Median of each standard estimates and TRUE/FALSE values if mean(pvalues) < sig_level
est.std_All <- data.frame(cbind(colMeans(est.std_dist), colMeans(pvalue_dist) <= sig_level))
Statistical Test
https://osf.io/unxj2/
functions_2.R
568
Change the participantID to a shorter & uniform name "Subject"
names(d)[names(d) == "Participant.Public.ID"] <- "Subject"
Data Variable
https://osf.io/cd5r8/
R_Code_Hui_et_al.R
569
For trimmed set, we remove any RT that is too long (>2500ms)
d_trimmed = d %>% filter (acc == 2, Reaction.Time > 300, Reaction.Time < 2500)
Data Variable
https://osf.io/cd5r8/
R_Code_Hui_et_al.R
570
Plotting create a dataset that contains means of both oddnumbered & evennumbered items' RTs
raw_untrimmed<-inner_join(x= mean.o, y=mean.e, by="Subject") names(raw_untrimmed) <- c("Subject", "Odd", "Even")
Visualization
https://osf.io/cd5r8/
R_Code_Hui_et_al.R
571
AVERAGE BY PARTICIPANT
mean.e = de %>% group_by(Subject) %>% summarise(mean(rt.diff)) mean.o = do %>% group_by(Subject) %>% summarise(mean(rt.diff)) mean.e = de %>% group_by(Subject) %>% summarise(mean(zrt.diff)) mean.o = do %>% group_by(Subject) %>% summarise(mean(zrt.diff)) mean.e = de %>% group_by(Subject) %>% summarise(mean(zrt.diff)) mean.o = do %>% group_by(Subject) %>% summarise(mean(zrt.diff))
Data Variable
https://osf.io/cd5r8/
R_Code_Hui_et_al.R
572
combine them into one data frame
person <- inner_join(person_rand_e, person_rand_o, by = "Subject", suffix = c("_even", "_odd"))
Data Variable
https://osf.io/cd5r8/
R_Code_Hui_et_al.R
573
Wilcoxon ranksum test on widths of unbroken artefacts:
results <- wilcox.test(An2entire$Width,Gmeentire$Width) Z <- qnorm(results$p.value)
Statistical Test
https://osf.io/tgf3q/
SI_4_Statistical_analyses.R
574
Wilcoxon ranksum test on lengths of unbroken artefacts:
results <- wilcox.test(An2entire$Length,Gmeentire$Length) Z <- qnorm(results$p.value)
Statistical Test
https://osf.io/tgf3q/
SI_4_Statistical_analyses.R
575
DATA DOWNSAMPLING add collumns with delta distance and delta time for downsampling
trajectory.df <- trajectory.df %>% group_by(id) %>% mutate(time_diff = difftime(dt, lag(dt, n = 1L), units = "min"), delta_x = x_utm - lag(x_utm, n = 1L), delta_y = y_utm - lag(y_utm, n = 1L), dist = sqrt(delta_x^2+delta_y^2))
Data Variable
https://osf.io/3bpn6/
af_homing_dataproc.R
576
Caculate daily cumulative distances and time lags Calculate speed Join with relocation info Add consecutive number per individual to indicate tracking day
daily <- trajectory.df %>% group_by(id, date) %>% filter(!dist == "NA") %>% dplyr::summarize(daily_dist = sum(dist), sex = first(sex), time_lag_min = sum(time_lag_min)) %>% mutate(id_day = paste(id, date), daily_speed = (daily_dist/as.numeric(time_lag_min) * 60)) %>% left_join(relocs.perday) %>% group_by(id) %>% mutate(trans_day = row_number())
Data Variable
https://osf.io/3bpn6/
af_homing_dataproc.R
577
Split up the trajectories into 50m and 200m translocations
trajectory50m <- trajectory.df %>% filter(trans_group == "50m") trajectory200m <- trajectory.df %>% filter(trans_group == "200m")
Data Variable
https://osf.io/3bpn6/
af_homing_dataproc.R
578
Plot daily movement by daily temp & sex
ggplot(data = daily, aes(x = temp, y = daily_dist, group = sex, color = sex)) + geom_point() + stat_smooth(method=lm) + theme_bw()
Visualization
https://osf.io/3bpn6/
af_homing_dataproc.R
579
Plot daily movement by daytime rainfall & sex
ggplot(data = daily, aes(x = rain_daytime, y = daily_dist, group = sex, color = sex)) + geom_point() + stat_smooth(method=lm) + theme_bw()
Visualization
https://osf.io/3bpn6/
af_homing_dataproc.R
580
Plot daily movement by cumulative rainfall & sex
ggplot(data = daily, aes(x = rain_cumul, y = daily_dist, group = sex, color = sex)) + geom_point() + stat_smooth(method=lm) + theme_bw()
Visualization
https://osf.io/3bpn6/
af_homing_dataproc.R
581
x is a matrix containing the data method : correlation method. "pearson"" or "spearman"" is supported removeTriangle : remove upper or lower triangle results : if "html" or "latex" the results will be displayed in html or latex format
corstars <-function(x, method=c("pearson", "spearman"), removeTriangle=c("upper", "lower"), result=c("none", "html", "latex")){
Statistical Test
https://osf.io/wcfj3/
funs.R
582
Diagonal matrix Tau used to scale the z_w
Tau_w <- matrix(c(DA.mp$`tau_w[1]`,0,0, 0,DA.mp$`tau_w[2]`,0, 0,0,DA.mp$`tau_w[3]` ),nrow=3,ncol=3)
Data Variable
https://osf.io/kdjqz/
sim_data_LissonEtAl2020.R
583
Run fixed effect model
life <- rma(yi = corrs$cor, vi = corrs$vi, measure = "COR", method = "FE")
Statistical Modeling
https://osf.io/9jzfr/
20180806funnelplotlifesatisfaction.R
584
Set max Y for graph
y_max<-max(d_dist)+1
Visualization
https://osf.io/ha4q8/
p_curve_d_distribution_power_app_Lakens.R
585
This does the summary. For each group's data frame, return a vector with N, mean, median, and sd
datac <- ddply(data, groupvars, .drop=.drop, .fun = function(xx, col) { c(N = length2(xx[[col]], na.rm=na.rm), mean = mean (xx[[col]], na.rm=na.rm), median = median (xx[[col]], na.rm=na.rm), sd = sd (xx[[col]], na.rm=na.rm) ) }, measurevar )
Data Variable
https://osf.io/gk6jh/
summarySE.R
586
Rename the "mean" and "median" columns
datac <- rename(datac, c("mean" = paste(measurevar, "_mean", sep = ""))) datac <- rename(datac, c("median" = paste(measurevar, "_median", sep = ""))) datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
Data Variable
https://osf.io/gk6jh/
summarySE.R
587
A function that reads in one line in the csv file, add tags and texts around it, and write the output line into an output file
add_tags <- function(l,o){ image_no <- strsplit(as.character(l[1]),"\\.")[[1]][1] image_URL <- paste("<img src='", as.character(l[2]),"'>",sep="") write(paste("[[Block:", image_no, "]] \n [[Question:DB]] \n [[ID:", image_no, "-image]] \n", image_URL, " \n [[Question:TE:SingleLine]] \n [[ID:", image_no, "-TE]] \n", TE, " \n [[Question:MC:SingleAnswer:Vertical]] \n [[ID:", image_no, "-MC]] \n", MC, " \n [[Choices]] \n", choices_formatted, "\n [[Question:Matrix]] \n [[ID:", image_no, "-ratings]] \n ",rating_prompt," \n [[AdvancedChoices]] \n ", rating_statements_formatted, "\n [[AdvancedAnswers]] \n [[Answer]] \n","1 - Strongly Disagree", "\n [[Answer]] \n 2 \n [[Answer]] \n 3 \n [[Answer]] \n 4 \n [[Answer]] \n 5 \n [[Answer]] \n 6 \n [[Answer]] \n", "7 - Strongly Agree"," \n [[PageBreak]] \n", sep = ""), o, append = TRUE) }
Data Variable
https://osf.io/t2jka/
batchUploadImages.R
588
Generate a plot for posterior predictive check (evaluate whether the ' posterior predictive data look more or less similar to the observed data) ' ' @param df dataframe with the data ' @param mod Bayesian model ' ' @return plot with posterior predictive check
get_pp_check <- function(df, mod) { map(list(df), ~brms::pp_check(mod,
Visualization
https://osf.io/5te7n/
save_get_pp_check.R
589
Simulate a single between group study given sample size n, true (fixed) effect size delta, and heterogeneity (random effect) tau. Generate data for the experimental group (y_e) and for the control group (y_c).
y_e = rnorm(n, 0, 1) + delta + rnorm(1, 0, tau) y_c = rnorm(n, 0, 1)
Statistical Modeling
https://osf.io/mg3ny/
1_sim_functions.R
590
calculate pooled variance S, standardized mean difference d, the variance of d, the pvalue, and N.
S = sqrt(((n - 1) * v_e + (n - 1) * v_c) / df) d = (m_e - m_c) / S var.d = (n + n)/(n * n) + (d^2 / (2 * (n + n))) se.d = sqrt(var.d) N = n + n
Data Variable
https://osf.io/mg3ny/
1_sim_functions.R
591
Box and Whisker Plots to Compare Models
scales <- list(x=list(relation="free"), y=list(relation="free")) bwplot(results, scales=scales)
Visualization
https://osf.io/uxdwh/
code.R
592
stimuli specify correlations for rnorm_multi (one of several methods)
stim_cors = stim_i_cor stim = rnorm_multi( n = stim_n, vars = 2, r = stim_cors, mu = 0, # means of random intercepts and slopes are always 0 sd = c(stim_sd, stim_version_sd), varnames = c("stim_i", "stim_version_slope") ) %>% mutate( stim_id = 1:stim_n ) stim_cors = stim_i_cor stim = rnorm_multi( n = stim_n, vars = 2, r = stim_cors, mu = 0, # means of random intercepts and slopes are always 0 sd = c(stim_sd, stim_version_sd), varnames = c("stim_i", "stim_version_slope") ) %>% mutate( stim_id = 1:stim_n ) stim_cors = stim_i_cor stim = rnorm_multi( n = stim_n, vars = 2, r = stim_cors, mu = 0, # means of random intercepts and slopes are always 0 sd = c(stim_sd, stim_version_sd), varnames = c("stim_i", "stim_version_slope") ) %>% mutate( stim_id = 1:stim_n )
Data Variable
https://osf.io/cd5r8/
Sim_Function.R
593
calculate trialspecific effects by adding overall effects and slopes
version_eff = stim_version_eff + stim_version_slope + sub_version_slope, version_eff = stim_version_eff + stim_version_slope + sub_version_slope, version_eff = stim_version_eff + stim_version_slope + sub_version_slope,
Statistical Modeling
https://osf.io/cd5r8/
Sim_Function.R
594
Take the list of csv files from above, and read them all into R (purrr::map function) This will download 100 csv files into a list The reduce function will then bind them all together into a dataframe
large_df <- csv_files %>% purrr::map(function(x) { read.csv(x) }) %>% purrr::reduce(cbind)
Data Variable
https://osf.io/skp56/
CSEP_DataScienceExPhys_Akerman2021.R
595
Re structure this dataframe to turn it to long format (pivot_longer) Keep the X columns in, these are the rowvalues we can use to denote a sample The column names contain the participant (P) and the session number (S) It will display with each 1st sample of each session for a given participant, so need to reorder to get sorted by participant, session, then second sample The names_pattern uses regular expresisons to denote that the names will come from the specified area i.e., after the P, which could contain any values which occur before the underscore, and likewise after the S
large_df <- large_df %>% tidyr::pivot_longer(cols = !contains("X"), names_to = c("participant", "session"), names_pattern = c("P(.*)_S(.*)"), values_to = "heart_rate") %>% dplyr::rename(seconds = "X") %>% dplyr::arrange(participant, session, seconds) %>% dplyr::mutate(participant = as.factor(participant), session = as.factor(session))
Data Variable
https://osf.io/skp56/
CSEP_DataScienceExPhys_Akerman2021.R
596
pool "exp_buc" columns into one column, so that advocacy type is one variable and create new "experienced" column for each level of advocacy type
data <- data %>% pivot_longer(names_to = "advocacytype", values_to = "experienced", graphic_exp_buc:disprotest_exp_buc)
Data Variable
https://osf.io/3aryn/
9Graphingspeciesism_Spanish.R
597
Create variables for residue that is 'present' vs 'absent' This is used for the tables/descriptives AND for color/filling the figures
data <- dplyr::mutate(data, oro_zero = if_else(valleculae_severity_rating == 0, "Absent", "Present")) data <- dplyr::mutate(data, hypo_zero = if_else(piriform_sinus_severity_rating == 0,"Absent", "Present")) data <- dplyr::mutate(data, epi_zero = if_else(epiglottis_severity_rating == 0, "Absent", "Present")) data <- dplyr::mutate(data, lv_zero = if_else(laryngeal_vestibule_severity_rating == 0, "Absent", "Present")) data <- dplyr::mutate(data, vf_zero = if_else(vocal_folds_severity_rating == 0, "Absent", "Present")) data <- dplyr::mutate(data, sg_zero = if_else(subglottis_severity_rating == 0, "Absent", "Present")) data <- dplyr::mutate(data, pas_zero = if_else(pas_max < 2, "Absent", "Present"))
Data Variable
https://osf.io/4anzm/
norms_code.R
598
Diagnostic tests to determine appropriate number of factors in EFA parallel analysis (includes scree plot) and Very Simple Structure test optimal number of factors is 6
fa.parallel(mainData[, c(2:56)], fm = 'ml', fa = 'fa') vss(mainData[, c(2:56)], n = 8, rotate = 'oblimin', fm = 'mle')
Statistical Test
https://osf.io/2j47e/
Factor analysis.R
599
replace missings values (9 and 99) with NA
pilot<- na_if(pilot,99) pilot<- na_if(pilot,-9)
Data Variable
https://osf.io/6579b/
03_Supplement.R
600
subset of participants with less then 4 missings
pilot<- subset(pilot, pilot$missings_a < 4) pilot_c <- pilot[,c("SJT_kb_00007","SJT_kb_00028","SJT_kb_00058", "SJT_kb_00054","SJT_kb_00072","SJT_kb_00026", "SJT_kb_00060","SJT_kb_00053","SJT_kb_00039", "SJT_kb_00027","SJT_kb_00015","SJT_kb_00038", "SJT_kb_00035","SJT_kb_00055","SJT_kb_00010")] pilot$missings_c <- apply(pilot_w,1,function(x) sum(is.na(x))) pilot<- subset(pilot, pilot$missings_c < 4) pilot_es <- pilot[,c("SJT_kb_00103","SJT_kb_00197","SJT_kb_00136", "SJT_kb_00134","SJT_kb_00113","SJT_kb_00128", "SJT_kb_00117","SJT_kb_00127","SJT_kb_00104", "SJT_kb_00108","SJT_kb_00094","SJT_kb_00143", "SJT_kb_00116","SJT_kb_00202","SJT_kb_00175")] pilot$missings_es <- apply(pilot_es,1,function(x) sum(is.na(x))) pilot<- subset(pilot, pilot$missings_es < 4)
Data Variable
https://osf.io/6579b/
03_Supplement.R