Get the number of processor cores sharing a cache (L1, L2, L3)

Below is the C ++ code that determines the size of CPU caches L1, L2 and L3 on Windows using GetLogicalProcessorInformation :

typedef BOOL (WINAPI *LPFN_GLPI)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);

LPFN_GLPI glpi = (LPFN_GLPI) GetProcAddress(
    GetModuleHandle(TEXT("kernel32")), "GetLogicalProcessorInformation");

if (glpi)
{
    DWORD bytes = 0;
    glpi(0, &bytes);
    size_t size = bytes / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
    vector<SYSTEM_LOGICAL_PROCESSOR_INFORMATION> info(size);
    glpi(info.data(), &bytes);

    for (size_t i = 0; i < size; i++)
    {
        if (info[i].Relationship == RelationCache)
        {
            if (info[i].Cache.Level == 1)
              l1_cache_Size = info[i].Cache.Size;
            if (info[i].Cache.Level == 2)
              l2_cache_Size = info[i].Cache.Size;
            if (info[i].Cache.Level == 3)
              l3_cache_Size = info[i].Cache.Size;
        }
    }
}

As a next step, I would like to get the number of processor cores that share the cache. On an x64 processor with a hyperthread, two logical cores of the processor usually use the L2 cache, and all logical cores of the processor share the L3 cache.

After reading the MSDN, I thought that GetLogicalProcessorInformationExboth CACHE_RELATIONSHIP and GROUP_AFFINITY , where the data structures that I was looking for, but after checking these data structures seem useless for my purpose.

Question:

, Windows, C/++? ( cpuid)


:

, , GetLogicalProcessorInformationEx CACHE_RELATIONSHIP GROUP_AFFINITY. GROUP_AFFINITY.Mask , (RelationCache). Intel GROUP_AFFINITY.Mask 2 , L2, 8 L3 4 8 .

++:

#include <windows.h>
#include <vector>
#include <iostream>

using namespace std;

typedef BOOL (WINAPI *LPFN_GLPI)(LOGICAL_PROCESSOR_RELATIONSHIP,
    PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, PDWORD);

int main()
{
    LPFN_GLPI glpi = (LPFN_GLPI) GetProcAddress(
        GetModuleHandle(TEXT("kernel32")), "GetLogicalProcessorInformationEx");

    if (!glpi)
        return 1;

    DWORD bytes = 0;
    glpi(RelationAll, 0, &bytes);
    vector<char> buffer(bytes);
    SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX* info;

    if (!glpi(RelationAll, (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*) &buffer[0], &bytes))
        return 1;

    for (size_t i = 0; i < bytes; i += info->Size)
    {
        info = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX*) &buffer[i];

        if (info->Relationship == RelationCache &&
            (info->Cache.Type == CacheData ||
             info->Cache.Type == CacheUnified))
        {
            cout << "info->Cache.Level: " << (int) info->Cache.Level << endl;
            cout << "info->Cache.CacheSize: " << (int) info->Cache.CacheSize << endl;
            cout << "info->Cache.GroupMask.Group: " << info->Cache.GroupMask.Group << endl;
            cout << "info->Cache.GroupMask.Mask: " << info->Cache.GroupMask.Mask << endl << endl;
        }
    }

    return 0;
}

:

, Windows , , . , , , ​​ L1, L2 L3.

+4
3

@RbMm:, CACHE_RELATIONSHIP . = , Cache- > GroupMask.Mask

AppVeyor CI ( ​​stackoverflow). x64:

info->Cache.Level: 1
info->Cache.CacheSize: 32768
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 1

info->Cache.Level: 1
info->Cache.CacheSize: 32768
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 1

info->Cache.Level: 2
info->Cache.CacheSize: 262144
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 1

info->Cache.Level: 3
info->Cache.CacheSize: 31457280
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 1

info->Cache.Level: 1
info->Cache.CacheSize: 32768
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 2

info->Cache.Level: 1
info->Cache.CacheSize: 32768
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 2

info->Cache.Level: 2
info->Cache.CacheSize: 262144
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 2

info->Cache.Level: 3
info->Cache.CacheSize: 31457280
info->Cache.GroupMask.Group: 0
info->Cache.GroupMask.Mask: 2

MSDN: "GroupMask.Mask - , ". , GroupMask.Mask L3, . GroupMask.Mask !

,

+1

Windows, (win10) 2 :

i5 (2 , 4 ):

ProcessorPackage
    [G0 000000000000000F { #3, #2, #1, #0}]
ProcessorCore HP=1 0
    [G0 0000000000000003 { #1, #0}]
Cache L1     8000 40 [G0 0000000000000003 { #1, #0}] A=8 Data
Cache L1     8000 40 [G0 0000000000000003 { #1, #0}] A=8 Instruction
Cache L2    40000 40 [G0 0000000000000003 { #1, #0}] A=8 Unified
Cache L3   300000 40 [G0 000000000000000F { #3, #2, #1, #0}] A=c Unified
ProcessorCore HP=1 0
    [G0 000000000000000C { #3, #2}]
Cache L1     8000 40 [G0 000000000000000C { #3, #2}] A=8 Data
Cache L1     8000 40 [G0 000000000000000C { #3, #2}] A=8 Instruction
Cache L2    40000 40 [G0 000000000000000C { #3, #2}] A=8 Unified
NumaNode #0 [G0 000000000000000F { #3, #2, #1, #0}]
Group:1/1
    4/4 [000000000000000F { #3, #2, #1, #0}]

i7 (4 , 8 ):

ProcessorPackage
    [G0 00000000000000FF { #7, #6, #5, #4, #3, #2, #1, #0}]
ProcessorCore HP=1 0
    [G0 0000000000000003 { #1, #0}]
Cache L1     8000 40 [G0 0000000000000003 { #1, #0}] A=8 Data
Cache L1     8000 40 [G0 0000000000000003 { #1, #0}] A=8 Instruction
Cache L2    40000 40 [G0 0000000000000003 { #1, #0}] A=4 Unified
Cache L3   800000 40 [G0 00000000000000FF { #7, #6, #5, #4, #3, #2, #1, #0}] A=10 Unified
ProcessorCore HP=1 0
    [G0 000000000000000C { #3, #2}]
Cache L1     8000 40 [G0 000000000000000C { #3, #2}] A=8 Data
Cache L1     8000 40 [G0 000000000000000C { #3, #2}] A=8 Instruction
Cache L2    40000 40 [G0 000000000000000C { #3, #2}] A=4 Unified
ProcessorCore HP=1 0
    [G0 0000000000000030 { #5, #4}]
Cache L1     8000 40 [G0 0000000000000030 { #5, #4}] A=8 Data
Cache L1     8000 40 [G0 0000000000000030 { #5, #4}] A=8 Instruction
Cache L2    40000 40 [G0 0000000000000030 { #5, #4}] A=4 Unified
ProcessorCore HP=1 0
    [G0 00000000000000C0 { #7, #6}]
Cache L1     8000 40 [G0 00000000000000C0 { #7, #6}] A=8 Data
Cache L1     8000 40 [G0 00000000000000C0 { #7, #6}] A=8 Instruction
Cache L2    40000 40 [G0 00000000000000C0 { #7, #6}] A=4 Unified
NumaNode #0 [G0 00000000000000FF { #7, #6, #5, #4, #3, #2, #1, #0}]
Group:1/1
    8/8 [00000000000000FF { #7, #6, #5, #4, #3, #2, #1, #0}]

:

void FormatMask(KAFFINITY Mask, PSTR sz)
{
    sz += sprintf(sz, "%p {", (PVOID)Mask);

    ULONG i = sizeof(KAFFINITY) * 8;
    do
    {
        if (_bittest((PLONG)&Mask, --i))
        {
            sz += sprintf(sz, " #%u,", i);
        }
    } while (i);

    *--sz = '}';
}

void DumpCpuInfo()
{
    static PCSTR szCacheType[] = {
        "Unified",
        "Instruction",
        "Data",
        "Trace"
    };

    char szMask[64 * 5 + 19];
    ULONG cb = 0, rcb = 0x400;
    static volatile UCHAR guz;
    PVOID stack = alloca(guz);
    union {
        PVOID Buffer;
        PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX plpi;
    };

    do
    {
        if (cb < rcb) rcb = cb = RtlPointerToOffset(Buffer = alloca(rcb - cb), stack);

        if (GetLogicalProcessorInformationEx(::RelationAll, plpi, &rcb))
        {
            DWORD Size;
            do
            {
                Size = plpi->Size;

                union {
                    PPROCESSOR_RELATIONSHIP Processor;
                    PNUMA_NODE_RELATIONSHIP NumaNode;
                    PCACHE_RELATIONSHIP     Cache;
                    PGROUP_RELATIONSHIP     Group;
                    PVOID pv;
                };

                pv = &plpi->Processor;

                switch (plpi->Relationship)
                {
                case RelationProcessorPackage:
                    DbgPrint("ProcessorPackage\n");
                    goto __0;
                case RelationProcessorCore:
                    DbgPrint("ProcessorCore HP=%x %x\n",
                        Processor->Flags & LTP_PC_SMT ? 1 : 0, Processor->EfficiencyClass);
__0:
                    if (WORD GroupCount = Processor->GroupCount)
                    {
                        PGROUP_AFFINITY GroupMask = Processor->GroupMask;
                        do
                        {
                            FormatMask(GroupMask->Mask, szMask);
                            DbgPrint("\t[G%u %s]\n", GroupMask->Group, szMask);
                        } while (GroupMask++, --GroupCount);
                    }
                    break;

                case RelationNumaNode:
                    FormatMask(NumaNode->GroupMask.Mask, szMask);
                    DbgPrint("NumaNode #%u [G%u %s]\n",
                        NumaNode->NodeNumber, NumaNode->GroupMask.Group, szMask);
                    break;

                case RelationGroup:
                    DbgPrint("Group:%u/%u\n", Group->ActiveGroupCount, Group->MaximumGroupCount);
                    if (WORD ActiveGroupCount = Group->ActiveGroupCount)
                    {
                        PPROCESSOR_GROUP_INFO GroupInfo = Group->GroupInfo;
                        do
                        {
                            FormatMask(GroupInfo->ActiveProcessorMask, szMask);
                            DbgPrint("\t%u/%u [%s]\n",
                                GroupInfo->ActiveProcessorCount,
                                GroupInfo->MaximumProcessorCount, szMask);
                        } while (GroupInfo, --ActiveGroupCount);
                    }
                    break;

                case RelationCache:
                    FormatMask(Cache->GroupMask.Mask, szMask);
                    DbgPrint("Cache L%u %8x %2x [G%u %s] A=%x %s\n",
                        Cache->Level,
                        Cache->CacheSize, Cache->LineSize,
                        Cache->GroupMask.Group, szMask,
                        Cache->Associativity,
                        szCacheType[Cache->Type % RTL_NUMBER_OF(szCacheType)]
                    );
                    break;
                }
                Buffer = RtlOffsetToPointer(plpi, Size);
            } while (rcb -= Size);
            break;
        }
    } while (GetLastError() == ERROR_INSUFFICIENT_BUFFER);
}
0

boost.

// number of logical cores
auto logical = boost::thread::hardware_concurrency();

// number of physical cores
auto physical = boost::thread::physical_concurrency();

.

-1

Source: https://habr.com/ru/post/1682784/


All Articles