The auc function takes an S3 object generated by evalmod and retrieves a data frame with the Area Under the Curve (AUC) scores of ROC and Precision-Recall curves.

auc(curves)

# S3 method for class 'aucs'
auc(curves)

Arguments

curves

An S3 object generated by evalmod. The auc function accepts the following S3 objects.

S3 object# of models# of test datasets
sscurvessinglesingle
mscurvesmultiplesingle
smcurvessinglemultiple
mmcurvesmultiplemultiple

See the Value section of evalmod for more details.

Value

The auc function returns a data frame with AUC scores.

See also

evalmod for generating S3 objects with performance evaluation measures. pauc for retrieving a dataset of pAUCs.

Examples


##################################################
### Single model & single test dataset
###

## Load a dataset with 10 positives and 10 negatives
data(P10N10)

## Generate an sscurve object that contains ROC and Precision-Recall curves
sscurves <- evalmod(scores = P10N10$scores, labels = P10N10$labels)

## Shows AUCs
auc(sscurves)
#>   modnames dsids curvetypes      aucs
#> 1       m1     1        ROC 0.7200000
#> 2       m1     1        PRC 0.7397716


##################################################
### Multiple models & single test dataset
###

## Create sample datasets with 100 positives and 100 negatives
samps <- create_sim_samples(1, 100, 100, "all")
mdat <- mmdata(samps[["scores"]], samps[["labels"]],
  modnames = samps[["modnames"]]
)

## Generate an mscurve object that contains ROC and Precision-Recall curves
mscurves <- evalmod(mdat)

## Shows AUCs
auc(mscurves)
#>    modnames dsids curvetypes      aucs
#> 1    random     1        ROC 0.4971000
#> 2    random     1        PRC 0.4992116
#> 3   poor_er     1        ROC 0.8328000
#> 4   poor_er     1        PRC 0.7860641
#> 5   good_er     1        ROC 0.8180000
#> 6   good_er     1        PRC 0.8574152
#> 7     excel     1        ROC 0.9780000
#> 8     excel     1        PRC 0.9782574
#> 9      perf     1        ROC 1.0000000
#> 10     perf     1        PRC 1.0000000


##################################################
### Single model & multiple test datasets
###

## Create sample datasets with 100 positives and 100 negatives
samps <- create_sim_samples(4, 100, 100, "good_er")
mdat <- mmdata(samps[["scores"]], samps[["labels"]],
  modnames = samps[["modnames"]],
  dsids = samps[["dsids"]]
)

## Generate an smcurve object that contains ROC and Precision-Recall curves
smcurves <- evalmod(mdat, raw_curves = TRUE)

## Get AUCs
sm_aucs <- auc(smcurves)

## Shows AUCs
sm_aucs
#>   modnames dsids curvetypes      aucs
#> 1  good_er     1        ROC 0.7865000
#> 2  good_er     1        PRC 0.8404735
#> 3  good_er     2        ROC 0.8313000
#> 4  good_er     2        PRC 0.8628264
#> 5  good_er     3        ROC 0.8244000
#> 6  good_er     3        PRC 0.8578336
#> 7  good_er     4        ROC 0.8204000
#> 8  good_er     4        PRC 0.8519919

## Get AUCs of Precision-Recall
sm_aucs_prc <- subset(sm_aucs, curvetypes == "PRC")

## Shows AUCs
sm_aucs_prc
#>   modnames dsids curvetypes      aucs
#> 2  good_er     1        PRC 0.8404735
#> 4  good_er     2        PRC 0.8628264
#> 6  good_er     3        PRC 0.8578336
#> 8  good_er     4        PRC 0.8519919

##################################################
### Multiple models & multiple test datasets
###

## Create sample datasets with 100 positives and 100 negatives
samps <- create_sim_samples(4, 100, 100, "all")
mdat <- mmdata(samps[["scores"]], samps[["labels"]],
  modnames = samps[["modnames"]],
  dsids = samps[["dsids"]]
)

## Generate an mscurve object that contains ROC and Precision-Recall curves
mmcurves <- evalmod(mdat, raw_curves = TRUE)

## Get AUCs
mm_aucs <- auc(mmcurves)

## Shows AUCs
mm_aucs
#>    modnames dsids curvetypes      aucs
#> 1    random     1        ROC 0.4509000
#> 2    random     1        PRC 0.4468330
#> 3   poor_er     1        ROC 0.8299000
#> 4   poor_er     1        PRC 0.7817533
#> 5   good_er     1        ROC 0.8285000
#> 6   good_er     1        PRC 0.8577692
#> 7     excel     1        ROC 0.9820000
#> 8     excel     1        PRC 0.9842602
#> 9      perf     1        ROC 1.0000000
#> 10     perf     1        PRC 1.0000000
#> 11   random     2        ROC 0.4974000
#> 12   random     2        PRC 0.5102109
#> 13  poor_er     2        ROC 0.7718000
#> 14  poor_er     2        PRC 0.7117766
#> 15  good_er     2        ROC 0.7925000
#> 16  good_er     2        PRC 0.8071713
#> 17    excel     2        ROC 0.9778000
#> 18    excel     2        PRC 0.9778305
#> 19     perf     2        ROC 1.0000000
#> 20     perf     2        PRC 1.0000000
#> 21   random     3        ROC 0.4797000
#> 22   random     3        PRC 0.5184681
#> 23  poor_er     3        ROC 0.8219000
#> 24  poor_er     3        PRC 0.7939097
#> 25  good_er     3        ROC 0.7832000
#> 26  good_er     3        PRC 0.8267456
#> 27    excel     3        ROC 0.9797000
#> 28    excel     3        PRC 0.9824702
#> 29     perf     3        ROC 1.0000000
#> 30     perf     3        PRC 1.0000000
#> 31   random     4        ROC 0.4924000
#> 32   random     4        PRC 0.4723165
#> 33  poor_er     4        ROC 0.7803000
#> 34  poor_er     4        PRC 0.7185701
#> 35  good_er     4        ROC 0.8343000
#> 36  good_er     4        PRC 0.8609178
#> 37    excel     4        ROC 0.9891000
#> 38    excel     4        PRC 0.9890191
#> 39     perf     4        ROC 1.0000000
#> 40     perf     4        PRC 1.0000000

## Get AUCs of Precision-Recall
mm_aucs_prc <- subset(mm_aucs, curvetypes == "PRC")

## Shows AUCs
mm_aucs_prc
#>    modnames dsids curvetypes      aucs
#> 2    random     1        PRC 0.4468330
#> 4   poor_er     1        PRC 0.7817533
#> 6   good_er     1        PRC 0.8577692
#> 8     excel     1        PRC 0.9842602
#> 10     perf     1        PRC 1.0000000
#> 12   random     2        PRC 0.5102109
#> 14  poor_er     2        PRC 0.7117766
#> 16  good_er     2        PRC 0.8071713
#> 18    excel     2        PRC 0.9778305
#> 20     perf     2        PRC 1.0000000
#> 22   random     3        PRC 0.5184681
#> 24  poor_er     3        PRC 0.7939097
#> 26  good_er     3        PRC 0.8267456
#> 28    excel     3        PRC 0.9824702
#> 30     perf     3        PRC 1.0000000
#> 32   random     4        PRC 0.4723165
#> 34  poor_er     4        PRC 0.7185701
#> 36  good_er     4        PRC 0.8609178
#> 38    excel     4        PRC 0.9890191
#> 40     perf     4        PRC 1.0000000