mirror of
https://github.com/RT-Thread/rt-thread.git
synced 2025-12-06 10:11:17 +08:00
[DM/DMA] Update DMA #10987
* Append WT attribute. * Change the API with pool size only. * Add address mask for DMA * Change DMA lock to mutex * Add pause callback for DMA engine driver * Add DMA Engine test * Add ARM PL330 DMA Engine driver
This commit is contained in:
@@ -5,6 +5,13 @@ menuconfig RT_USING_DMA
|
||||
select RT_USING_ADT_BITMAP
|
||||
default n
|
||||
|
||||
config RT_DMA_PL330
|
||||
bool "ARM PL330"
|
||||
depends on RT_USING_DMA
|
||||
depends on RT_USING_CLK
|
||||
depends on RT_USING_RESET
|
||||
default n
|
||||
|
||||
if RT_USING_DMA
|
||||
osource "$(SOC_DM_DMA_DIR)/Kconfig"
|
||||
endif
|
||||
|
||||
@@ -10,6 +10,9 @@ CPPPATH = [cwd + '/../include']
|
||||
|
||||
src = ['dma.c', 'dma_pool.c']
|
||||
|
||||
if GetDepend(['RT_DMA_PL330']):
|
||||
src += ['dma-pl330.c']
|
||||
|
||||
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
|
||||
|
||||
Return('group')
|
||||
|
||||
1045
components/drivers/dma/dma-pl330.c
Normal file
1045
components/drivers/dma/dma-pl330.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -19,6 +19,22 @@
|
||||
static rt_list_t dmac_nodes = RT_LIST_OBJECT_INIT(dmac_nodes);
|
||||
static RT_DEFINE_SPINLOCK(dmac_nodes_lock);
|
||||
|
||||
static void dma_lock(struct rt_dma_controller *ctrl)
|
||||
{
|
||||
if (rt_thread_self())
|
||||
{
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
}
|
||||
}
|
||||
|
||||
static void dma_unlock(struct rt_dma_controller *ctrl)
|
||||
{
|
||||
if (rt_thread_self())
|
||||
{
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl)
|
||||
{
|
||||
const char *dev_name;
|
||||
@@ -64,11 +80,11 @@ rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl)
|
||||
return -RT_EINVAL;
|
||||
}
|
||||
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
|
||||
if (!rt_list_isempty(&ctrl->channels_nodes))
|
||||
{
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
return -RT_EBUSY;
|
||||
}
|
||||
|
||||
@@ -77,7 +93,7 @@ rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl)
|
||||
rt_dm_dev_unbind_fwdata(ctrl->dev, RT_NULL);
|
||||
}
|
||||
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
rt_mutex_detach(&ctrl->mutex);
|
||||
|
||||
rt_spin_lock(&dmac_nodes_lock);
|
||||
@@ -106,11 +122,45 @@ rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan)
|
||||
|
||||
ctrl = chan->ctrl;
|
||||
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
|
||||
err = ctrl->ops->start(chan);
|
||||
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
rt_err_t rt_dma_chan_pause(struct rt_dma_chan *chan)
|
||||
{
|
||||
rt_err_t err;
|
||||
struct rt_dma_controller *ctrl;
|
||||
|
||||
if (!chan)
|
||||
{
|
||||
return -RT_EINVAL;
|
||||
}
|
||||
|
||||
if (!chan->ctrl->ops->pause)
|
||||
{
|
||||
LOG_D("%s: No pause, try stop", rt_dm_dev_get_name(chan->ctrl->dev));
|
||||
return rt_dma_chan_stop(chan);
|
||||
}
|
||||
|
||||
if (chan->prep_err)
|
||||
{
|
||||
LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
|
||||
|
||||
return chan->prep_err;
|
||||
}
|
||||
|
||||
ctrl = chan->ctrl;
|
||||
|
||||
dma_lock(ctrl);
|
||||
|
||||
err = ctrl->ops->pause(chan);
|
||||
|
||||
dma_unlock(ctrl);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -134,11 +184,11 @@ rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan)
|
||||
|
||||
ctrl = chan->ctrl;
|
||||
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
|
||||
err = ctrl->ops->stop(chan);
|
||||
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -188,11 +238,11 @@ rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
|
||||
goto _end;
|
||||
}
|
||||
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
|
||||
err = ctrl->ops->config(chan, conf);
|
||||
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
|
||||
if (!err)
|
||||
{
|
||||
@@ -233,6 +283,19 @@ static rt_bool_t range_is_illegal(const char *name, const char *desc,
|
||||
return illegal;
|
||||
}
|
||||
|
||||
static rt_bool_t addr_is_supported(const char *name, const char *desc,
|
||||
rt_uint64_t mask, rt_ubase_t addr)
|
||||
{
|
||||
rt_bool_t illegal = !!(addr & ~mask);
|
||||
|
||||
if (illegal)
|
||||
{
|
||||
LOG_E("%s: %s %p is out of mask %p", name, desc, addr, mask);
|
||||
}
|
||||
|
||||
return illegal;
|
||||
}
|
||||
|
||||
rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
|
||||
struct rt_dma_slave_transfer *transfer)
|
||||
{
|
||||
@@ -262,6 +325,18 @@ rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
|
||||
dma_addr_dst = transfer->dst_addr;
|
||||
len = transfer->buffer_len;
|
||||
|
||||
if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
|
||||
ctrl->addr_mask, conf->src_addr))
|
||||
{
|
||||
return -RT_ENOSYS;
|
||||
}
|
||||
|
||||
if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
|
||||
ctrl->addr_mask, conf->dst_addr))
|
||||
{
|
||||
return -RT_ENOSYS;
|
||||
}
|
||||
|
||||
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
|
||||
dma_addr_src, conf->src_addr))
|
||||
{
|
||||
@@ -276,11 +351,11 @@ rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
|
||||
|
||||
if (ctrl->ops->prep_memcpy)
|
||||
{
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
|
||||
err = ctrl->ops->prep_memcpy(chan, dma_addr_src, dma_addr_dst, len);
|
||||
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -327,6 +402,12 @@ rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
|
||||
{
|
||||
dma_buf_addr = transfer->src_addr;
|
||||
|
||||
if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
|
||||
ctrl->addr_mask, conf->src_addr))
|
||||
{
|
||||
return -RT_ENOSYS;
|
||||
}
|
||||
|
||||
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
|
||||
dma_buf_addr, conf->src_addr))
|
||||
{
|
||||
@@ -337,6 +418,12 @@ rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
|
||||
{
|
||||
dma_buf_addr = transfer->dst_addr;
|
||||
|
||||
if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
|
||||
ctrl->addr_mask, conf->dst_addr))
|
||||
{
|
||||
return -RT_ENOSYS;
|
||||
}
|
||||
|
||||
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
|
||||
dma_buf_addr, conf->dst_addr))
|
||||
{
|
||||
@@ -350,12 +437,12 @@ rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
|
||||
|
||||
if (ctrl->ops->prep_cyclic)
|
||||
{
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
|
||||
err = ctrl->ops->prep_cyclic(chan, dma_buf_addr,
|
||||
transfer->buffer_len, transfer->period_len, dir);
|
||||
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -402,6 +489,12 @@ rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
|
||||
{
|
||||
dma_buf_addr = transfer->src_addr;
|
||||
|
||||
if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
|
||||
ctrl->addr_mask, conf->src_addr))
|
||||
{
|
||||
return -RT_ENOSYS;
|
||||
}
|
||||
|
||||
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
|
||||
dma_buf_addr, conf->src_addr))
|
||||
{
|
||||
@@ -412,6 +505,12 @@ rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
|
||||
{
|
||||
dma_buf_addr = transfer->dst_addr;
|
||||
|
||||
if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
|
||||
ctrl->addr_mask, conf->dst_addr))
|
||||
{
|
||||
return -RT_ENOSYS;
|
||||
}
|
||||
|
||||
if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
|
||||
dma_buf_addr, conf->dst_addr))
|
||||
{
|
||||
@@ -425,12 +524,12 @@ rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
|
||||
|
||||
if (ctrl->ops->prep_single)
|
||||
{
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
|
||||
err = ctrl->ops->prep_single(chan, dma_buf_addr,
|
||||
transfer->buffer_len, dir);
|
||||
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -556,9 +655,9 @@ struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
|
||||
chan->conf_err = -RT_ERROR;
|
||||
chan->prep_err = -RT_ERROR;
|
||||
|
||||
rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
|
||||
dma_lock(ctrl);
|
||||
rt_list_insert_before(&ctrl->channels_nodes, &chan->list);
|
||||
rt_mutex_release(&ctrl->mutex);
|
||||
dma_unlock(ctrl);
|
||||
|
||||
return chan;
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <rtdbg.h>
|
||||
|
||||
#include <mm_aspace.h>
|
||||
#include <mm_memblock.h>
|
||||
#include <dt-bindings/size.h>
|
||||
|
||||
static RT_DEFINE_SPINLOCK(dma_pools_lock);
|
||||
@@ -291,9 +292,13 @@ static rt_ubase_t dma_pool_alloc(struct rt_dma_pool *pool, rt_size_t size)
|
||||
rt_bitmap_set_bit(pool->map, next_bit);
|
||||
}
|
||||
|
||||
LOG_D("%s offset = %p, pages = %d", "Alloc",
|
||||
pool->start + bit * ARCH_PAGE_SIZE, size);
|
||||
|
||||
return pool->start + bit * ARCH_PAGE_SIZE;
|
||||
}
|
||||
_next:
|
||||
;
|
||||
}
|
||||
|
||||
return RT_NULL;
|
||||
@@ -310,6 +315,8 @@ static void dma_pool_free(struct rt_dma_pool *pool, rt_ubase_t offset, rt_size_t
|
||||
{
|
||||
rt_bitmap_clear_bit(pool->map, bit);
|
||||
}
|
||||
|
||||
LOG_D("%s offset = %p, pages = %d", "Free", offset, size);
|
||||
}
|
||||
|
||||
static void *dma_alloc(struct rt_device *dev, rt_size_t size,
|
||||
@@ -344,11 +351,6 @@ static void *dma_alloc(struct rt_device *dev, rt_size_t size,
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((flags & RT_DMA_F_LINEAR) && !((pool->flags & RT_DMA_F_LINEAR)))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
*dma_handle = dma_pool_alloc(pool, size);
|
||||
|
||||
if (*dma_handle && !(flags & RT_DMA_F_NOMAP))
|
||||
@@ -357,6 +359,10 @@ static void *dma_alloc(struct rt_device *dev, rt_size_t size,
|
||||
{
|
||||
dma_buffer = rt_ioremap_nocache((void *)*dma_handle, size);
|
||||
}
|
||||
else if (flags & RT_DMA_F_WT)
|
||||
{
|
||||
dma_buffer = rt_ioremap_wt((void *)*dma_handle, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
dma_buffer = rt_ioremap_cached((void *)*dma_handle, size);
|
||||
@@ -584,20 +590,33 @@ struct rt_dma_pool *rt_dma_pool_install(rt_region_t *region)
|
||||
return pool;
|
||||
}
|
||||
|
||||
rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
|
||||
rt_size_t cma_size, rt_size_t coherent_pool_size)
|
||||
rt_err_t rt_dma_pool_extract(rt_size_t cma_size, rt_size_t coherent_pool_size)
|
||||
{
|
||||
struct rt_dma_pool *pool;
|
||||
rt_region_t *region = region_list, *region_high = RT_NULL, cma, coherent_pool;
|
||||
struct rt_mmblk_reg *reg, *reg_high;
|
||||
struct rt_memblock *memblock = rt_memblock_get_reserved();
|
||||
rt_region_t *region, *region_high = RT_NULL, cma, coherent_pool;
|
||||
|
||||
if (!region_list || !list_len || cma_size < coherent_pool_size)
|
||||
if (!memblock)
|
||||
{
|
||||
return -RT_ENOSYS;
|
||||
}
|
||||
|
||||
/* Coherent pool is included in CMA */
|
||||
if (cma_size < coherent_pool_size)
|
||||
{
|
||||
return -RT_EINVAL;
|
||||
}
|
||||
|
||||
for (rt_size_t i = 0; i < list_len; ++i, ++region)
|
||||
rt_slist_for_each_entry(reg, &memblock->reg_list, node)
|
||||
{
|
||||
if (!region->name)
|
||||
if (!reg->alloc || (reg->flags & MEMBLOCK_HOTPLUG))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
region = ®->memreg;
|
||||
if (rt_strcmp(region->name, "dma-pool") || !reg->memreg.name)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
@@ -608,6 +627,7 @@ rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
|
||||
if ((rt_ssize_t)((4UL * SIZE_GB) - region->start) < cma_size)
|
||||
{
|
||||
region_high = region;
|
||||
reg_high = reg;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -618,6 +638,7 @@ rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
|
||||
if (region_high)
|
||||
{
|
||||
region = region_high;
|
||||
reg = reg_high;
|
||||
LOG_W("No available DMA zone in 4G");
|
||||
|
||||
goto _found;
|
||||
@@ -630,9 +651,6 @@ _found:
|
||||
{
|
||||
cma.start = region->start;
|
||||
cma.end = cma.start + cma_size;
|
||||
|
||||
/* Update input region */
|
||||
region->start += cma_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -657,6 +675,8 @@ _found:
|
||||
return -RT_ENOMEM;
|
||||
}
|
||||
|
||||
reg->alloc = RT_FALSE;
|
||||
|
||||
return RT_EOK;
|
||||
}
|
||||
|
||||
|
||||
@@ -81,6 +81,8 @@ struct rt_dma_controller
|
||||
|
||||
struct rt_device *dev;
|
||||
|
||||
#define RT_DMA_ADDR_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))
|
||||
rt_uint64_t addr_mask;
|
||||
RT_BITMAP_DECLARE(dir_cap, RT_DMA_DIR_MAX);
|
||||
const struct rt_dma_controller_ops *ops;
|
||||
|
||||
@@ -95,6 +97,7 @@ struct rt_dma_controller_ops
|
||||
rt_err_t (*release_chan)(struct rt_dma_chan *chan);
|
||||
|
||||
rt_err_t (*start)(struct rt_dma_chan *chan);
|
||||
rt_err_t (*pause)(struct rt_dma_chan *chan);
|
||||
rt_err_t (*stop)(struct rt_dma_chan *chan);
|
||||
rt_err_t (*config)(struct rt_dma_chan *chan, struct rt_dma_slave_config *conf);
|
||||
|
||||
@@ -164,10 +167,19 @@ rt_inline void rt_dma_controller_add_direction(struct rt_dma_controller *ctrl,
|
||||
rt_bitmap_set_bit(ctrl->dir_cap, dir);
|
||||
}
|
||||
|
||||
rt_inline void rt_dma_controller_set_addr_mask(struct rt_dma_controller *ctrl,
|
||||
rt_uint64_t mask)
|
||||
{
|
||||
RT_ASSERT(ctrl != RT_NULL);
|
||||
|
||||
ctrl->addr_mask = mask;
|
||||
}
|
||||
|
||||
rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl);
|
||||
rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl);
|
||||
|
||||
rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan);
|
||||
rt_err_t rt_dma_chan_pause(struct rt_dma_chan *chan);
|
||||
rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan);
|
||||
rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
|
||||
struct rt_dma_slave_config *conf);
|
||||
@@ -188,6 +200,7 @@ rt_err_t rt_dma_chan_release(struct rt_dma_chan *chan);
|
||||
#define RT_DMA_F_NOCACHE RT_BIT(2)
|
||||
#define RT_DMA_F_DEVICE RT_BIT(3)
|
||||
#define RT_DMA_F_NOMAP RT_BIT(4)
|
||||
#define RT_DMA_F_WT RT_BIT(5)
|
||||
|
||||
#define RT_DMA_PAGE_SIZE ARCH_PAGE_SIZE
|
||||
|
||||
@@ -228,7 +241,6 @@ rt_inline void rt_dma_device_set_ops(struct rt_device *dev,
|
||||
}
|
||||
|
||||
struct rt_dma_pool *rt_dma_pool_install(rt_region_t *region);
|
||||
rt_err_t rt_dma_pool_extract(rt_region_t *region_list, rt_size_t list_len,
|
||||
rt_size_t cma_size, rt_size_t coherent_pool_size);
|
||||
rt_err_t rt_dma_pool_extract(rt_size_t cma_size, rt_size_t coherent_pool_size);
|
||||
|
||||
#endif /* __DMA_H__ */
|
||||
|
||||
146
examples/test/dma_test.c
Normal file
146
examples/test/dma_test.c
Normal file
@@ -0,0 +1,146 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2021, RT-Thread Development Team
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Change Logs:
|
||||
* Date Author Notes
|
||||
* 2023-02-25 GuEe-GUI the first version
|
||||
*/
|
||||
|
||||
#include <rthw.h>
|
||||
#include <rtthread.h>
|
||||
#include <rtdevice.h>
|
||||
|
||||
#if defined(RT_USING_DMA) && defined(RT_USING_FINSH)
|
||||
#include <stdlib.h>
|
||||
|
||||
static void test_dma_callback(struct rt_dma_chan *chan, rt_size_t size)
|
||||
{
|
||||
rt_bool_t *done_ptr = chan->priv;
|
||||
|
||||
*done_ptr = RT_TRUE;
|
||||
rt_hw_wmb();
|
||||
}
|
||||
|
||||
static int dma_memcpy_test(int argc, char**argv)
|
||||
{
|
||||
rt_bool_t done;
|
||||
int dma_sz = 64;
|
||||
rt_ubase_t dma_addr;
|
||||
char *src_addr, *dst_addr;
|
||||
struct rt_device dev = {};
|
||||
struct rt_dma_slave_config config;
|
||||
struct rt_dma_slave_transfer transfer;
|
||||
struct rt_dma_chan *chn = rt_dma_chan_request(&dev, RT_NULL);
|
||||
|
||||
if (rt_is_err_or_null(chn))
|
||||
{
|
||||
rt_kputs("Alloc DMA channel fail");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (argc > 1)
|
||||
{
|
||||
dma_sz = atoi(argv[1]);
|
||||
}
|
||||
|
||||
if (dma_sz % sizeof(rt_uint32_t))
|
||||
{
|
||||
dma_sz = RT_ALIGN_DOWN(dma_sz, sizeof(rt_uint32_t));
|
||||
rt_kprintf("DMA size align to %d\n", dma_sz);
|
||||
}
|
||||
|
||||
if (!(src_addr = rt_dma_alloc_coherent(&dev, dma_sz, &dma_addr)))
|
||||
{
|
||||
rt_kprintf("Alloc DMA %s buffer(size = %d) fail\n", "SRC", dma_sz);
|
||||
goto _free_dma_chan;
|
||||
}
|
||||
config.src_addr = dma_addr;
|
||||
|
||||
if (!(dst_addr = rt_dma_alloc_coherent(&dev, dma_sz, &dma_addr)))
|
||||
{
|
||||
rt_kprintf("Alloc DMA %s buffer(size = %d) fail\n", "DST", dma_sz);
|
||||
goto _free_src_addr;
|
||||
}
|
||||
config.dst_addr = dma_addr;
|
||||
|
||||
config.direction = RT_DMA_MEM_TO_MEM;
|
||||
config.src_addr_width = sizeof(rt_uint32_t);
|
||||
config.src_maxburst = sizeof(rt_uint32_t);
|
||||
config.dst_addr_width = sizeof(rt_uint32_t);
|
||||
config.dst_maxburst = sizeof(rt_uint32_t);
|
||||
|
||||
chn->callback = test_dma_callback;
|
||||
chn->priv = &done;
|
||||
if (rt_dma_chan_config(chn, &config))
|
||||
{
|
||||
rt_kprintf("DMA channel %s fail\n", "config");
|
||||
goto _free_dst_addr;
|
||||
}
|
||||
|
||||
rt_memset(&transfer, 0, sizeof(transfer));
|
||||
transfer.src_addr = config.src_addr;
|
||||
transfer.dst_addr = config.dst_addr;
|
||||
transfer.buffer_len = dma_sz;
|
||||
|
||||
if (rt_dma_prep_memcpy(chn, &transfer))
|
||||
{
|
||||
rt_kprintf("DMA channel %s fail\n", "prep");
|
||||
goto _free_dst_addr;
|
||||
}
|
||||
|
||||
rt_memset(src_addr, 0xff, dma_sz);
|
||||
rt_memset(dst_addr, 0, dma_sz);
|
||||
|
||||
rt_kprintf("%s %s:\n", "SRC", "start");
|
||||
for (int i = 0; i < dma_sz; ++i)
|
||||
{
|
||||
rt_kprintf("%02x ", src_addr[i]);
|
||||
}
|
||||
rt_kputs("\n");
|
||||
|
||||
rt_kprintf("%s %s:\n", "DST", "start");
|
||||
for (int i = 0; i < dma_sz; ++i)
|
||||
{
|
||||
rt_kprintf("%02x ", dst_addr[i]);
|
||||
}
|
||||
rt_kputs("\n");
|
||||
|
||||
done = RT_FALSE;
|
||||
if (rt_dma_chan_start(chn))
|
||||
{
|
||||
rt_kprintf("DMA channel %s fail\n", "start");
|
||||
goto _free_dst_addr;
|
||||
}
|
||||
|
||||
while (!done)
|
||||
{
|
||||
rt_hw_cpu_relax();
|
||||
}
|
||||
|
||||
rt_kprintf("%s %s:\n", "SRC", "end");
|
||||
for (int i = 0; i < dma_sz; ++i)
|
||||
{
|
||||
rt_kprintf("%02x ", src_addr[i]);
|
||||
}
|
||||
rt_kputs("\n");
|
||||
|
||||
rt_kprintf("%s %s:\n", "DST", "end");
|
||||
for (int i = 0; i < dma_sz; ++i)
|
||||
{
|
||||
rt_kprintf("%02x ", dst_addr[i]);
|
||||
}
|
||||
rt_kputs("\n");
|
||||
|
||||
_free_dst_addr:
|
||||
rt_dma_free_coherent(&dev, dma_sz, dst_addr, config.dst_addr);
|
||||
_free_src_addr:
|
||||
rt_dma_free_coherent(&dev, dma_sz, src_addr, config.src_addr);
|
||||
_free_dma_chan:
|
||||
rt_dma_chan_release(chn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
MSH_CMD_EXPORT(dma_memcpy_test, test dma memcpy e.g: dma_memcpy_test(64));
|
||||
#endif /* RT_USING_DMA && RT_USING_FINSH */
|
||||
Reference in New Issue
Block a user