How to work with reserved CMA memory?

I would like to allocate a piece of physically contiguous reserved memory (at predefined physical addresses) for my DMA-enabled device. As I see it, CMA has three options: 1. To reserve memory through the kernel configuration file. 2. To reserve memory through the cmdline core. 3. To reserve memory through the memory of the node device tree. In the first case, you can reserve the size and number of areas.

CONFIG_DMA_CMA=y
CONFIG_CMA_AREAS=7
CONFIG_CMA_SIZE_MBYTES=8

Therefore, I could use:

start_cma_virt = dma_alloc_coherent(dev->cmadev, (size_t)size_cma, &start_cma_dma, GFP_KERNEL);

in my driver to allocate continuous memory. I could use it a maximum of 7 times, and it will be possible to allocate up to 8M. But unfortunately,

dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));

from arch / arm / mm / init.c:

void __init arm_memblock_init(struct meminfo *mi,const struct machine_desc *mdesc)

It is not possible to set predefined physical addresses for continuous distribution. Of course, I could use the cmdline core:

mem=cma=cmadevlabel=8M@32M cma_map=mydevname=cmadevlabel
//struct device *dev = cmadev->dev; /*dev->name is mydevname*/

dma_alloc_coherent() 32M + 8M (0x2000000 + 0x800000) 0x27FFFFF. , , . , cmdline ? - .

cmadev_region: mycma {
    /*no-map;*/ /*DMA coherent memory*/
    /*reusable;*/
    reg = <0x02000000 0x00100000>;      
};

phandle node:

memory-region = <&cmadev_region>;

, :

of_find_node_by_name(); //find needed node
of_parse_phandle(); //resolve a phandle property to a device_node pointer
of_get_address(); //get DT __be32 physical addresses
of_translate_address(); //DT represent local (bus, device) addresses so translate it to CPU physical addresses 
request_mem_region(); //reserve IOMAP memory (cat /proc/iomem)
ioremap(); //alloc entry in page table for reserved memory and return kernel logical addresses.

DMA ( API dma_alloc_coherent) dma_alloc_coherent() IO-MAP ioremap().

start_cma_virt = dma_alloc_coherent (dev- > cmadev, (size_t) size_cma, & start_cma_dma, GFP_KERNEL);

(reg = < 0x02000000 0x00100000 > ;) dev- > cmadev? cmdline , . _parse_phandle() ( DT). dma_alloc_coherent dma cmadev_region: mycma?

+4
1

dma_alloc_coherent() , dma_coherent. - :

:

cmadev_region: mycma {
    compatible = "compatible-name"
    no-map;
    reg = <0x02000000 0x00100000>;      
};

:

struct device *cma_dev;

static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
    int ret;

    if (!mem) {
        ret = dma_declare_coherent_memory(cma_dev, rmem->base, rmem->base,
                           rmem->size, DMA_MEMORY_EXCLUSIVE);
        if (ret) {
            pr_err("Error");
            return ret;
        }
    }
    return 0;
}

static void rmem_dma_device_release(struct reserved_mem *rmem,
                struct device *dev)
{
    if (dev)
        dev->dma_mem = NULL;
}

static const struct reserved_mem_ops rmem_dma_ops = {
    .device_init    = rmem_dma_device_init,
    .device_release = rmem_dma_device_release,
};

int __init cma_setup(struct reserved_mem *rmem)
{
    rmem->ops = &rmem_dma_ops;
    return 0;
}
RESERVEDMEM_OF_DECLARE(some-name, "compatible-name", cma_setup);

cma_dev dma_alloc_coherent .

0

Source: https://habr.com/ru/post/1629902/


All Articles