The auc
function takes an S3
object generated by
evalmod
and retrieves a data frame with the Area Under
the Curve (AUC) scores of ROC and Precision-Recall curves.
auc(curves)
# S3 method for aucs
auc(curves)
The auc
function returns a data frame with AUC scores.
##################################################
### Single model & single test dataset
###
## Load a dataset with 10 positives and 10 negatives
data(P10N10)
## Generate an sscurve object that contains ROC and Precision-Recall curves
sscurves <- evalmod(scores = P10N10$scores, labels = P10N10$labels)
## Shows AUCs
auc(sscurves)
#> modnames dsids curvetypes aucs
#> 1 m1 1 ROC 0.7200000
#> 2 m1 1 PRC 0.7397716
##################################################
### Multiple models & single test dataset
###
## Create sample datasets with 100 positives and 100 negatives
samps <- create_sim_samples(1, 100, 100, "all")
mdat <- mmdata(samps[["scores"]], samps[["labels"]],
modnames = samps[["modnames"]]
)
## Generate an mscurve object that contains ROC and Precision-Recall curves
mscurves <- evalmod(mdat)
## Shows AUCs
auc(mscurves)
#> modnames dsids curvetypes aucs
#> 1 random 1 ROC 0.4987000
#> 2 random 1 PRC 0.5010844
#> 3 poor_er 1 ROC 0.8268000
#> 4 poor_er 1 PRC 0.8192780
#> 5 good_er 1 ROC 0.7711000
#> 6 good_er 1 PRC 0.8262156
#> 7 excel 1 ROC 0.9773000
#> 8 excel 1 PRC 0.9781720
#> 9 perf 1 ROC 1.0000000
#> 10 perf 1 PRC 1.0000000
##################################################
### Single model & multiple test datasets
###
## Create sample datasets with 100 positives and 100 negatives
samps <- create_sim_samples(4, 100, 100, "good_er")
mdat <- mmdata(samps[["scores"]], samps[["labels"]],
modnames = samps[["modnames"]],
dsids = samps[["dsids"]]
)
## Generate an smcurve object that contains ROC and Precision-Recall curves
smcurves <- evalmod(mdat, raw_curves = TRUE)
## Get AUCs
sm_aucs <- auc(smcurves)
## Shows AUCs
sm_aucs
#> modnames dsids curvetypes aucs
#> 1 good_er 1 ROC 0.8278000
#> 2 good_er 1 PRC 0.8620813
#> 3 good_er 2 ROC 0.8006000
#> 4 good_er 2 PRC 0.8305047
#> 5 good_er 3 ROC 0.7782000
#> 6 good_er 3 PRC 0.8283688
#> 7 good_er 4 ROC 0.8113000
#> 8 good_er 4 PRC 0.8281613
## Get AUCs of Precision-Recall
sm_aucs_prc <- subset(sm_aucs, curvetypes == "PRC")
## Shows AUCs
sm_aucs_prc
#> modnames dsids curvetypes aucs
#> 2 good_er 1 PRC 0.8620813
#> 4 good_er 2 PRC 0.8305047
#> 6 good_er 3 PRC 0.8283688
#> 8 good_er 4 PRC 0.8281613
##################################################
### Multiple models & multiple test datasets
###
## Create sample datasets with 100 positives and 100 negatives
samps <- create_sim_samples(4, 100, 100, "all")
mdat <- mmdata(samps[["scores"]], samps[["labels"]],
modnames = samps[["modnames"]],
dsids = samps[["dsids"]]
)
## Generate an mscurve object that contains ROC and Precision-Recall curves
mmcurves <- evalmod(mdat, raw_curves = TRUE)
## Get AUCs
mm_aucs <- auc(mmcurves)
## Shows AUCs
mm_aucs
#> modnames dsids curvetypes aucs
#> 1 random 1 ROC 0.5126000
#> 2 random 1 PRC 0.5065496
#> 3 poor_er 1 ROC 0.8301000
#> 4 poor_er 1 PRC 0.7762342
#> 5 good_er 1 ROC 0.7848000
#> 6 good_er 1 PRC 0.8385790
#> 7 excel 1 ROC 0.9864000
#> 8 excel 1 PRC 0.9871382
#> 9 perf 1 ROC 1.0000000
#> 10 perf 1 PRC 1.0000000
#> 11 random 2 ROC 0.5323000
#> 12 random 2 PRC 0.5274389
#> 13 poor_er 2 ROC 0.7992000
#> 14 poor_er 2 PRC 0.7906430
#> 15 good_er 2 ROC 0.7880000
#> 16 good_er 2 PRC 0.8273877
#> 17 excel 2 ROC 0.9791000
#> 18 excel 2 PRC 0.9801492
#> 19 perf 2 ROC 1.0000000
#> 20 perf 2 PRC 1.0000000
#> 21 random 3 ROC 0.4756000
#> 22 random 3 PRC 0.4908076
#> 23 poor_er 3 ROC 0.8543000
#> 24 poor_er 3 PRC 0.8429257
#> 25 good_er 3 ROC 0.7871000
#> 26 good_er 3 PRC 0.8244409
#> 27 excel 3 ROC 0.9855000
#> 28 excel 3 PRC 0.9859997
#> 29 perf 3 ROC 1.0000000
#> 30 perf 3 PRC 1.0000000
#> 31 random 4 ROC 0.4767000
#> 32 random 4 PRC 0.4852457
#> 33 poor_er 4 ROC 0.8173000
#> 34 poor_er 4 PRC 0.7730963
#> 35 good_er 4 ROC 0.7866000
#> 36 good_er 4 PRC 0.8130312
#> 37 excel 4 ROC 0.9860000
#> 38 excel 4 PRC 0.9886682
#> 39 perf 4 ROC 1.0000000
#> 40 perf 4 PRC 1.0000000
## Get AUCs of Precision-Recall
mm_aucs_prc <- subset(mm_aucs, curvetypes == "PRC")
## Shows AUCs
mm_aucs_prc
#> modnames dsids curvetypes aucs
#> 2 random 1 PRC 0.5065496
#> 4 poor_er 1 PRC 0.7762342
#> 6 good_er 1 PRC 0.8385790
#> 8 excel 1 PRC 0.9871382
#> 10 perf 1 PRC 1.0000000
#> 12 random 2 PRC 0.5274389
#> 14 poor_er 2 PRC 0.7906430
#> 16 good_er 2 PRC 0.8273877
#> 18 excel 2 PRC 0.9801492
#> 20 perf 2 PRC 1.0000000
#> 22 random 3 PRC 0.4908076
#> 24 poor_er 3 PRC 0.8429257
#> 26 good_er 3 PRC 0.8244409
#> 28 excel 3 PRC 0.9859997
#> 30 perf 3 PRC 1.0000000
#> 32 random 4 PRC 0.4852457
#> 34 poor_er 4 PRC 0.7730963
#> 36 good_er 4 PRC 0.8130312
#> 38 excel 4 PRC 0.9886682
#> 40 perf 4 PRC 1.0000000