Increase efficiency to remove duplicate values ​​in a row and shift values ​​in R

I have a huge data set (> 2.5 million). A small subset looks like this (code reproducible)

temp <- data.frame(list(col1 = c("424", "560", "557"), 
                        col2 = c("276", "427", "V46"), 
                        col3 = c("780", "V45", "584"), 
                        col4 = c("276", "V45", "995"), 
                        col5 = c("428", "799", "427")))

> temp
  col1 col2 col3 col4 col5
1  424  276  780  276  428
2  560  427  V45  V45  799
3  557  V46  584  995  427

I am trying to remove duplicates for each row and shift the values ​​to the left using this code

library(plyr)
temp <- apply(temp,1,function(x) unique(unlist(x)))
temp <- ldply(temp, rbind)

> temp
      1   2   3   4    5
  1 424 276 780 428 <NA>
  2 560 427 V45 799 <NA>
  3 557 V46 584 995  427

I do this successfully, however, when I extend the above code to my original huge dataset, I run into performance issues. because I use apply, the code takes a long time to execute

Can i improve this?

+4
source share
2 answers

If you only have rows, you should really use a matrix, not a data frame. Perhaps the transfer will be useful.

temp <- data.frame(list(col1 = c("424", "560", "557"), 
                        col2 = c("276", "427", "V46"), 
                        col3 = c("780", "V45", "584"), 
                        col4 = c("276", "V45", "995"), 
                        col5 = c("428", "799", "427")),
                   stringsAsFactors = FALSE)

p <- ncol(temp)

myf <- compiler::cmpfun(
  function(x) {
    un <- unique(x)
    d <- p - length(un)
    if (d > 0) {
      un <- c(un, rep(NA_character_, d))
    }
    un
  }
)

microbenchmark::microbenchmark(
  privefl = as.data.frame(t(apply(t(temp), 2, myf))),
  OP = plyr::ldply(apply(temp, 1, function(x) unique(unlist(x))), rbind)
)

:

Unit: microseconds
    expr     min       lq      mean   median       uq       max neval
 privefl 278.775 301.7855  376.2803 320.8235 409.0580  1705.428   100
      OP 567.152 619.7950 1027.1277 658.2010 792.6225 29558.777   100

100 000 (temp <- temp[sample(nrow(temp), size = 1e5, replace = TRUE), ]):

Unit: milliseconds
    expr       min        lq      mean    median       uq      max neval
 privefl  975.1688  975.1688  988.2184  988.2184 1001.268 1001.268     2
      OP 9196.5199 9196.5199 9518.3922 9518.3922 9840.264 9840.264     2
+2

apply, . , length(x) NA.

nc <- ncol(temp)
t(apply(temp, 1, function(x) unique(x)[1:nc]))

#      [,1]  [,2]  [,3]  [,4]  [,5] 
# [1,] "424" "276" "780" "428" NA   
# [2,] "560" "427" "V45" "799" NA   
# [3,] "557" "V46" "584" "995" "427"

A data.table , . , .

setDT(temp)
nc <- ncol(temp)
dcast(melt(temp[, ri := seq_len(.N)], id.var = "ri")[
  , value := unique(value)[1:nc], by = ri], ri ~ variable)[ , ri := NULL][]
#    col1 col2 col3 col4 col5
# 1:  424  276  780  428   NA
# 2:  560  427  V45  799   NA
# 3:  557  V46  584  995  427

, . . 1e5 OP , .

temp <- temp[sample(nrow(temp), size = 3e6, replace = TRUE), ]

microbenchmark::microbenchmark(
  privefl = {
    p <- ncol(temp)
    myf <- compiler::cmpfun(
      function(x) {
        un <- unique(x)
        d <- p - length(un)
        if (d > 0) {
          un <- c(un, rep(NA_character_, d))
        }
        un
      }
    )
    as.data.frame(t(apply(t(temp), 2, myf)))},

  h1 = {nc <- ncol(temp)
  as.data.frame(t(apply(temp, 1, function(x) unique(x)[1:nc])))},

  h2 = {d <- as.data.table(temp)
    nc <- ncol(d)
    dcast(melt(d[, ri := seq_len(.N)], id.var = "ri")[
      , value := unique(value)[1:nc], by = ri], ri ~ variable)[ , ri := NULL]},
  times = 20, unit = "relative")

# Unit: relative
#    expr      min       lq     mean   median       uq      max neval cld
# privefl 1.312071 1.342116 1.341450 1.354268 1.403343 1.243641    20   b
#      h1 1.227693 1.270512 1.270115 1.332642 1.301049 1.156123    20   b
#      h2 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000    20  a
+2

Source: https://habr.com/ru/post/1689842/


All Articles