Get the number of identical faces for different groups

I have a dataset with people (IDs) that can be part of more than one group.

Example:

library(data.table)
DT <- data.table(
  ID = rep(1:5, c(3:1, 2:3)),
  Group = c("A", "B", "C", "B",
            "C", "A", "A", "C",
            "A", "B", "C")
)
DT
#     ID Group
#  1:  1     A
#  2:  1     B
#  3:  1     C
#  4:  2     B
#  5:  2     C
#  6:  3     A
#  7:  4     A
#  8:  4     C
#  9:  5     A
# 10:  5     B
# 11:  5     C

I want to know the sum of the same persons for 2 groups.

The result should look like this:

  Group.1    Group.2    Sum
    A           B        2
    A           C        3
    B           C        3

Where Sum indicates the number of individuals who have two groups.

+4
source share
5 answers

Here is my version:

# size-1 IDs can't contribute; skip
DT[ , if (.N > 1) 
  # simplify = FALSE returns a list;
  #   transpose turns the 3-length list of 2-length vectors
  #   into a length-2 list of 3-length vectors (efficiently)
  transpose(combn(Group, 2L, simplify = FALSE)), by = ID
  ][ , .(Sum = .N), keyby = .(Group.1 = V1, Group.2 = V2)]

With an exit:

#    Group.1 Group.2 Sum
# 1:       A       B   2
# 2:       A       C   3
# 3:       B       C   3
+4
source

Starting from version 1.9.8 (according to CRAN on Nov 25, 2016), I data.tablegot the opportunity to perform joins without binding. Thus, you can use self-connection without binding:

library(data.table) # v1.9.8+
setDT(DT)[, Group:= factor(Group)]
DT[DT, on = .(ID, Group < Group), nomatch = 0L, .(ID, x.Group, i.Group)][
  , .N, by = .(x.Group, i.Group)]
   x.Group i.Group N
1:       A       B 2
2:       A       C 3
3:       B       C 3

Explanantion

ID, Group < Group data.table combn() ( ):

DT[DT, on = .(ID, Group < Group), nomatch = 0L, .(ID, x.Group, i.Group)]
   ID x.Group i.Group
1:  1       A       B
2:  1       A       C
3:  1       B       C
4:  2       B       C
5:  4       A       C
6:  5       A       B
7:  5       A       C
8:  5       B       C
+3

on 'ID', , "" , (.N), "", ' Group.1 '' Group.2 ', pmin/pmax, unique "N".

 library(data.table)#v1.9.6+
 DT[DT, on='ID', allow.cartesian=TRUE][Group!=i.Group, .N ,.(Group, i.Group)][, 
      list(Sum=unique(N)) ,.(Group.1=pmin(Group, i.Group), Group.2=pmax(Group, i.Group))]

#   Group.1 Group.2 Sum
#1:       A       B   2
#2:       A       C   3
#3:       B       C   3

, @MichaelChirico @Frank, 'Group' factor, as.integer(Group) < as.integer(i.Group), group by 'Group', 'i.Group' nrow (.N)

DT[, Group:= factor(Group)]
DT[DT, on='ID', allow.cartesian=TRUE][as.integer(Group) < as.integer(i.Group), .N, 
                       by = .(Group.1= Group, Group.2= i.Group)] 
+2

( R):

tmp <- split(DT, DT[, 'Group'])
ans <- apply(combn(LETTERS[1 : 3], 2), 2, FUN = function(ind){
            out <- length(intersect(tmp[[ind[1]]][, 1], tmp[[ind[2]]][, 1]))
            c(group1 = ind[1], group2 = ind[2], sum_ = out) 
                }
            )

data.frame(t(ans))

#  group1 group2 sum_
#1      A      B    2
#2      A      C    3
#3      B      C    3

First, divide the data into a list of groups, then for each unique pairwise combination of two groups, see how many common objects they have using length(intersect(....

+1
source

Great answers above. Just an alternative to use dplyrif you are interested or someone else.

library(dplyr)

cmb = combn(unique(dt$Group),2)

data.frame(g1 = cmb[1,],
           g2 = cmb[2,]) %>%
  group_by(g1,g2) %>%
  summarise(l=length(intersect(DT[DT$Group==g1,]$ID,
                               DT[DT$Group==g2,]$ID)))

    #       g1     g2     l
    #    (fctr) (fctr) (int)
    # 1      A      B     2
    # 2      A      C     3
    # 3      B      C     3
+1
source

Source: https://habr.com/ru/post/1615077/


All Articles