Select rows with min value by group

Using DWin's solution, tapply can be avoided using ave.

df[ df$v1 == ave(df$v1, df$f, FUN=min), ]

This gives another speed-up, as shown below. Mind you, this is also dependent on the number of levels. I give this as I notice that ave is far too often forgotten about, although it is one of the more powerful functions in R.

f <- rep(letters[1:20],10000)
v1 <- rnorm(20*10000)
v2 <- 1:(20*10000)
df <- data.frame(f,v1,v2)

> system.time(df[ df$v1 == ave(df$v1, df$f, FUN=min), ])
   user  system elapsed 
   0.05    0.00    0.05 

> system.time(df[ df$v1 %in% tapply(df$v1, df$f, min), ])
   user  system elapsed 
   0.25    0.03    0.29 

> system.time(lapply(split(df, df$f), FUN = function(x) {
+             vec <- which(x[3] == min(x[3]))
+             return(x[vec, ])
+         })
+  .... [TRUNCATED] 
   user  system elapsed 
   0.56    0.00    0.58 

> system.time(df[tapply(1:nrow(df),df$f,function(i) i[which.min(df$v1[i])]),]
+ )
   user  system elapsed 
   0.17    0.00    0.19 

> system.time( ddply(df, .var = "f", .fun = function(x) {
+     return(subset(x, v1 %in% min(v1)))
+     }
+ )
+ )
   user  system elapsed 
   0.28    0.00    0.28 

A data.table solution.

library(data.table)
DT <- as.data.table(df)
DT[,.SD[which.min(v1)], by = f]

##   f  v1 v2
## 1: a 1.3  1
## 2: b 2.0  3
## 3: c 1.1  6
## 4: d 3.1  8

Or, more efficiently

DT[DT[,.I[which.min(v1)],by=f][['V1']]]

some benchmarking

f <- rep(letters[1:20],100000)
v1 <- rnorm(20*100000)
v2 <- 1:(20*100000)
df <- data.frame(f,v1,v2)
DT <- as.data.table(df)
f1<-function(){df2<-df[order(df$f,df$v1),]
               df2[!duplicated(df2$f),]}

f2<-function(){df2<-df[order(df$v1),]
               df2[!duplicated(df2$f),]}

f3<-function(){df[ df$v1 == ave(df$v1, df$f, FUN=min), ]}


f4 <- function(){DT[,.SD[which.min(v1)], by = f]}

f5 <- function(){DT[DT[,.I[which.min(v1)],by=f][['V1']]]}

library(microbenchmark)
microbenchmark(f1(),f2(),f3(),f4(), f5(),times = 5)
# Unit: milliseconds
# expr       min        lq    median        uq       max neval
# f1() 3254.6620 3265.4760 3286.5440 3411.4054 3475.4198     5
# f2() 1630.8572 1639.3472 1651.5422 1721.4670 1738.6684     5
# f3()  172.2639  174.0448  177.4985  179.9604  184.7365     5
# f4()  206.1837  209.8161  209.8584  210.4896  210.7893     5
# f5()  105.5960  106.5006  107.9486  109.7216  111.1286     5

The .I approach is the winner (FR #2330 will hopefully render the elegance of the .SD approach similarly fast when implemented).

Tags:

R

Dataframe