I have a large RDS file that I want to work with in parallel using R. When downloading a file, 7.3 GB of RAM is loaded.
If I try to use many cores, R fails because it runs out of memory. Is there a way to tell mclapply to use shared memory instead of creating a copy of the object?
This is the code I have:
results <- readRDS('ResultsICC.RDS')
rand <- 0
Icc <- c(.5, 1, 1.5)
n <- c(.1, .5, 1)
phi <- c(0, .5, 1)
parameterSettings <- expand.grid(rand=rand, Icc=Icc, n=n, phi=phi)
rr <- list()
Ns <- results[[1]][[1]][[2]][,c('Country', 'n')]
EstimatedBestPFiveArmRaw <- matrix(NA, 26, 1000)
EstimatedBestP <- matrix(NA, 26, 1000)
outterloop <- function(dataIN){
for(k in 1:1000){
best <- dataIN[[k]][[2]]
EstimatedBestPFiveArmRaw[,k] <- rep(weighted.mean(best$estimatedBestPFiveArmRaw, best$n), 26)
pHat <- dataIN[[k]][[3]]
best <- Ns
best$estimatedBest <- best$estimateBestP <- NA
for(j in 1:26){
best$estimatedBest <- sapply(split(pHat[,paste0('cohort', j+1, 'pHat')], pHat$Country),
which.max)
for(i in 1:nrow(best))
best$estimatedBestP[i] <- pHat$p[pHat$Country==best$Country[i] &
pHat$treatNum==best$estimatedBest[i]]
EstimatedBestP[j, k] <- weighted.mean(best$estimatedBestP, best$n)
}
rr <- (EstimatedBestP/EstimatedBestPFiveArmRaw-1)*100
}
return(rr)
}
library(parallel)
rr <- mclapply(X = results, FUN = outterloop, mc.cores = 27, mc.preschedule = T)
I run this in a linux box with 32 cores and 64 GB of RAM.
Thank!
source
share