N <- 10000 d <- data.frame( ID=seq(1, N), v1=sample(c("M","F", "M", "L"), N, replace = TRUE), v2=sample(c("D","M","D","D"), N, replace = TRUE), v3=sample(c("F","G","F","E"), N, replace = TRUE), v4=sample(c("A","B","A","B"), N, replace = TRUE) )
Using data.table (fastest)
dt <- data.table::as.data.table(d) dt[, .N, by = c('v1','v2','v3','v4')]
With dplyr
dplyr::count_(d, vars = c('v1','v2','v3','v4'))
With plyr
plyr::count(d, vars = c('v1','v2','v3','v4')) plyr::ddply(d, .variables = c('v1','v2','v3','v4'), nrow)
With aggregate (slowest)
aggregate(ID ~ ., d, FUN = length)
Benchmark
microbenchmark::microbenchmark(dt[, .N, by = c('v1','v2','v3','v4')], plyr::count(d, vars = c('v1','v2','v3','v4')), plyr::ddply(d, .variables = c('v1','v2','v3','v4'), nrow), dplyr::count_(d, vars = c('v1','v2','v3','v4')), aggregate(ID ~ ., d, FUN = length), times = 1000) Unit: microseconds expr min lq mean median uq max neval cld dt[, .N, by = c("v1", "v2", "v3", "v4")] 887.807 1107.543 1263.777 1174.258 1289.724 4263.156 1000 a plyr::count(d, vars = c("v1", "v2", "v3", "v4")) 3912.791 4270.387 5379.080 4498.053 5791.743 157146.103 1000 c plyr::ddply(d, .variables = c("v1", "v2", "v3", "v4"), nrow) 7737.874 8553.370 10630.849 9018.266 11126.517 187301.696 1000 d dplyr::count_(d, vars = c("v1", "v2", "v3", "v4")) 2126.913 2432.957 2763.499 2568.251 2789.386 12549.669 1000 b aggregate(ID ~ ., d, FUN = length) 7395.440 8121.828 10546.659 8776.371 10858.263 210139.759 1000 d
It is best to just use data.table instead of data.frame , as it is the fastest and does not need another function or library to count. Note also that the aggregate function performs much slower on large data sets.
Final note: feel free to update new methods.