Linux中DMA会使用硬件IOMMU如AMD IOMMU, INTEL VT-D, 也会使用软件的SWIOTLB

这篇梳理一下LINUX内核在有AMD IOMMU的情况下,是如何做DMA的,内容包括如下

1. struct iommu_ops amd_iommu_ops

2. struct dma_map_ops iommu_dma_ops

3. DMA struct dma_map_ops 与 struct iommu_ops的关系

    Consistent, Streaming

4. struct io_pgtable_ops, 及与struct iommu_ops amd_iommu_ops的关系

1. 两处会设置struct iommu_ops amd_iommu_ops;

const struct iommu_ops amd_iommu_ops = {
    .capable = amd_iommu_capable,
    .domain_alloc = amd_iommu_domain_alloc,  // 分配一个iommu_domain
    .domain_free  = amd_iommu_domain_free,
    .attach_dev = amd_iommu_attach_device, //针对独立设备(即所在Group里只有自己),将设备所在Group与domain进行绑定
    .detach_dev = amd_iommu_detach_device,
    .map = amd_iommu_map, //用于映射domain内的iova,将长度为sizeiova为起始地址的iova区域映射到以paddr为起始地址的物理地址。该函数只能用于UNMANAGED类型和DMA类型的domain
    .iotlb_sync_map    = amd_iommu_iotlb_sync_map,
    .unmap = amd_iommu_unmap,
    .iova_to_phys = amd_iommu_iova_to_phys, // 将iova转换成物理地址
    .probe_device = amd_iommu_probe_device,
    .release_device = amd_iommu_release_device,
    .probe_finalize = amd_iommu_probe_finalize,
    .device_group = amd_iommu_device_group,
    .get_resv_regions = amd_iommu_get_resv_regions,
    .put_resv_regions = generic_iommu_put_resv_regions,
    .is_attach_deferred = amd_iommu_is_attach_deferred,
    .pgsize_bitmap    = AMD_IOMMU_PGSIZES,
    .flush_iotlb_all = amd_iommu_flush_iotlb_all,
    .iotlb_sync = amd_iommu_iotlb_sync,
    .def_domain_type = amd_iommu_def_domain_type,
};

一处在struct iommu_device的iommu ops;

另一处在struct bus_type的iommu ops;

amd_iommu_init ->

        iommu_go_to_state ->

                state_next ->

                        amd_iommu_init_pci ->

                                iommu_init_pci ->

                                        iommu_device_register

struct iommu_device {
    struct list_head list;
    const struct iommu_ops *ops;
    struct fwnode_handle *fwnode;
    struct device *dev;
};

/*
 * Structure where we save information about one hardware AMD IOMMU in the
 * system.
 */

struct amd_iommu {

        ...

        struct iommu_device iommu;/* Handle for IOMMU core code */

        ...

}

amd_iommu_init ->

        iommu_go_to_state ->

                state_next ->

                        amd_iommu_init_pci ->

                                amd_iommu_init_api ->

                                        bus_set_iommu ->  //将自身挂入到 对应总线中

struct bus_type {

        ...

        const struct iommu_ops *iommu_ops;

        ...
};

2. 设置struct dma_map_ops iommu_dma_ops

amd_iommu_ops.probe_finalize = amd_iommu_probe_finalize ->

        iommu_setup_dma_ops->

                struct device *dev->dma_ops = &iommu_dma_ops;

struct device {
        ...
    const struct dma_map_ops *dma_ops; // DMA mapping operations for this device
        ...
};

static const struct dma_map_ops iommu_dma_ops = {
    .alloc            = iommu_dma_alloc,
    .free            = iommu_dma_free,
    .alloc_pages        = dma_common_alloc_pages,
    .free_pages        = dma_common_free_pages,
#ifdef CONFIG_DMA_REMAP
    .alloc_noncontiguous    = iommu_dma_alloc_noncontiguous,
    .free_noncontiguous    = iommu_dma_free_noncontiguous,
#endif
    .mmap            = iommu_dma_mmap,
    .get_sgtable        = iommu_dma_get_sgtable,
    .map_page        = iommu_dma_map_page,
    .unmap_page        = iommu_dma_unmap_page,
    .map_sg            = iommu_dma_map_sg,
    .unmap_sg        = iommu_dma_unmap_sg,
    .sync_single_for_cpu    = iommu_dma_sync_single_for_cpu,
    .sync_single_for_device    = iommu_dma_sync_single_for_device,
    .sync_sg_for_cpu    = iommu_dma_sync_sg_for_cpu,
    .sync_sg_for_device    = iommu_dma_sync_sg_for_device,
    .map_resource        = iommu_dma_map_resource,
    .unmap_resource        = iommu_dma_unmap_resource,
    .get_merge_boundary    = iommu_dma_get_merge_boundary,
};

iommu_dma_alloc ->

        void *cpu_addr = iommu_dma_alloc_pages

        dma_addr_t iova = __iommu_dma_map (-> iommu_dma_alloc_iova)

3. DMA -- struct dma_map_ops 与 struct iommu_ops

如果写过LINUX DMA驱动,会接触过以下几个函数:

Consistent

void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)

Streaming

dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction)

dma_map_page

dma_map_sg

其中dma_alloc_coherent,的调用栈为:

dma_alloc_coherent ->

        dma_alloc_attrs ->

                struct dma_map_ops *ops->alloc == iommu_dma_alloc ->

                        __iommu_dma_map ->

                                iommu_map_atomic ->

                                        _iommu_map->

                                                __iommu_map->

                                                        __iommu_map_pages->

                                                                struct iommu_ops *ops->map_pages/map

这里的iommu_ops.map 在使用AMD IOMMU的时候,就是.map = amd_iommu_map,

                        

#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)

对应到dma_map_single的调用栈为:

dma_map_single_attrs->

        dma_map_page_attrs->

                struct dma_map_ops *ops->map_page == iommu_dma_map_page ->

                        __iommu_dma_map_swiotlb ->

                                __iommu_dma_map -> 

                                        之后与dma_alloc_coherent相同

                        

        

4.  struct io_pgtable_ops

amd_iommu_map最终的实现在struct io_pgtable_ops

amd_iommu_map ->

        struct io_pgtable_ops *ops->map

        

struct io_pgtable_ops {
    int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
           phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
    int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
             phys_addr_t paddr, size_t pgsize, size_t pgcount,
             int prot, gfp_t gfp, size_t *mapped);
    size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
            size_t size, struct iommu_iotlb_gather *gather);
    size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
                  size_t pgsize, size_t pgcount,
                  struct iommu_iotlb_gather *gather);
    phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
                    unsigned long iova);
};

struct io_pgtable {
    enum io_pgtable_fmt    fmt;
    void            *cookie;
    struct io_pgtable_cfg    cfg;
    struct io_pgtable_ops    ops;
};

struct amd_io_pgtable {
    struct io_pgtable_cfg    pgtbl_cfg;
    struct io_pgtable    iop;
        ...
};

struct protection_domain {
        ...
    struct iommu_domain domain; /* generic domain handle used by
                       iommu core code */
    struct amd_io_pgtable iop;
        ...
};

struct amd_io_pgtable *pgtable

    pgtable->iop.ops.map          = iommu_v1_map_page; //maps a physical address into a DMA
address space. It allocates the page table pages if necessary, 建立DMA addr与paddr的page table

    pgtable->iop.ops.unmap        = iommu_v1_unmap_page;
    pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;

5. Summary

系统中的调用关系如下:

struct dma_map_ops iommu_dma_ops ->

        struct iommu_ops amd_iommu_op ->

                struct io_pgtable_ops

iommu是实现在dma mapping api下层的驱动,所以我们只需要使用dma mapping的相关api,不需要直接调用iommu接口

AMD IOMMU驱动实现了自己的struct io_pgtable_ops

类似在内核中的还有ARM SMMU与Apple DART等,详见include/linux/io-pgtable.h文件

Reference:

[1] 

​​​​​​dma_map_ops 实现的三种方式_jason的笔记-CSDN博客_dma map​​​​​​

[2]

​​​​​​kernel是如何选择iommu的呢?_jason的笔记-CSDN博客[

[3]

iommu_dma_mmap + mmap - tycoon3 - 博客园 (cnblogs)

【4】

Documentation/core-api/dma-api-howto.rst

Documentation/core-api/dma-api.rst

Notes:

1. 处于同一个domain中的设备使用同一套映射做地址转换, 就是独立的页表

2. Group中default_domain和domain的概念:domain指group当前所在的domain,而default_domain指Group默认应该在的domain

进行attach操作时,会检查default_domain是否与domain相同,以此判断该Group是否已经attach到别的domain上了

如果Group有自己的default_domain,那么该函数iommu_detach_device在detach完成之后会重新attach到default_domain上

3. PCIE是一个点对点的协议,如果一个多function设备挂到了一个不支持ACS的bridge下,那么这两个function可以通过该bridge进行通信。这样的通信直接由bridge进行转发而无需通过Root Complex,自然也就无需通过IOMMU。这种情况下,这两个function的IOVA无法完全通过IOMMU隔离开,所以他们需要分到同一个Group中。同一个Group的设备应该是共用一个domain的

4. 每一个domain即代表一个iommu映射地址空间,即一个page table。一个Group逻辑上是需要与domain进行绑定的,即一个Group中的所有设备都位于一个domain中

Questions:

The difference between IOMMU_DOMAIN_UNMANAGED  & IOMMU_DOMAIN_DMA?  

/*
 * This are the possible domain-types
 *
 *    IOMMU_DOMAIN_BLOCKED    - All DMA is blocked, can be used to isolate
 *                  devices
 *    IOMMU_DOMAIN_IDENTITY    - DMA addresses are system physical addresses
 *    IOMMU_DOMAIN_UNMANAGED    - DMA mappings managed by IOMMU-API user, used
 *                  for VMs
 *    IOMMU_DOMAIN_DMA    - Internally used for DMA-API implementations.
 *                  This flag allows IOMMU drivers to implement
 *                  certain optimizations for these domains
 *    IOMMU_DOMAIN_DMA_FQ    - As above, but definitely using batched TLB
 *                  invalidation.
 */

更多推荐

AMD IOMMU与Linux (3) -- DMA