summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/dmaengine.c55
-rw-r--r--drivers/dma/dmatest.c182
-rw-r--r--drivers/dma/ioat/dma_v3.c26
-rw-r--r--drivers/dma/mv_xor.c53
-rw-r--r--drivers/dma/mv_xor.h28
5 files changed, 115 insertions, 229 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 99af4db5948b..eee16b01fa89 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -382,20 +382,30 @@ void dma_issue_pending_all(void)
EXPORT_SYMBOL(dma_issue_pending_all);
/**
- * nth_chan - returns the nth channel of the given capability
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as the cpu
* @cap: capability to match
- * @n: nth channel desired
+ * @cpu: cpu index which the channel should be close to
*
- * Defaults to returning the channel with the desired capability and the
- * lowest reference count when 'n' cannot be satisfied. Must be called
- * under dma_list_mutex.
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
*/
-static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
{
struct dma_device *device;
struct dma_chan *chan;
- struct dma_chan *ret = NULL;
struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
list_for_each_entry(device, &dma_device_list, global_node) {
if (!dma_has_cap(cap, device->cap_mask) ||
@@ -404,27 +414,22 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
list_for_each_entry(chan, &device->channels, device_node) {
if (!chan->client_count)
continue;
- if (!min)
- min = chan;
- else if (chan->table_count < min->table_count)
+ if (!min || chan->table_count < min->table_count)
min = chan;
- if (n-- == 0) {
- ret = chan;
- break; /* done */
- }
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
}
- if (ret)
- break; /* done */
}
- if (!ret)
- ret = min;
+ chan = localmin ? localmin : min;
- if (ret)
- ret->table_count++;
+ if (chan)
+ chan->table_count++;
- return ret;
+ return chan;
}
/**
@@ -441,7 +446,6 @@ static void dma_channel_rebalance(void)
struct dma_device *device;
int cpu;
int cap;
- int n;
/* undo the last distribution */
for_each_dma_cap_mask(cap, dma_cap_mask_all)
@@ -460,14 +464,9 @@ static void dma_channel_rebalance(void)
return;
/* redistribute available channels */
- n = 0;
for_each_dma_cap_mask(cap, dma_cap_mask_all)
for_each_online_cpu(cpu) {
- if (num_possible_cpus() > 1)
- chan = nth_chan(cap, n++);
- else
- chan = nth_chan(cap, -1);
-
+ chan = min_chan(cap, cpu);
per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
}
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e88ded2c8d2f..92f796cdc6ab 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -25,44 +25,46 @@
#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO);
+module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_channel[20];
-module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+module_param_string(channel, test_channel, sizeof(test_channel),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
static char test_device[20];
-module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+module_param_string(device, test_device, sizeof(test_device),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO);
+module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO);
+module_param(max_channels, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO);
+module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO);
+module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO);
+module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO);
+module_param(timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
@@ -193,7 +195,6 @@ struct dmatest_info {
/* debugfs related stuff */
struct dentry *root;
- struct dmatest_params dbgfs_params;
/* Test results */
struct list_head results;
@@ -406,7 +407,11 @@ static int thread_result_add(struct dmatest_info *info,
list_add_tail(&tr->node, &r->results);
mutex_unlock(&info->results_lock);
- pr_warn("%s\n", thread_result_get(r->name, tr));
+ if (tr->type == DMATEST_ET_OK)
+ pr_debug("%s\n", thread_result_get(r->name, tr));
+ else
+ pr_warn("%s\n", thread_result_get(r->name, tr));
+
return 0;
}
@@ -1007,7 +1012,15 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
result_free(info, NULL);
/* Copy test parameters */
- memcpy(params, &info->dbgfs_params, sizeof(*params));
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strlcpy(params->device, strim(test_device), sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
/* Run test with new parameters */
return __run_threaded_test(info);
@@ -1029,71 +1042,6 @@ static bool __is_threaded_test_run(struct dmatest_info *info)
return false;
}
-static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
- const void __user *from, size_t count)
-{
- char tmp[20];
- ssize_t len;
-
- len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
- if (len >= 0) {
- tmp[len] = '\0';
- strlcpy(to, strim(tmp), available);
- }
-
- return len;
-}
-
-static ssize_t dtf_read_channel(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.channel,
- strlen(info->dbgfs_params.channel));
-}
-
-static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.channel,
- sizeof(info->dbgfs_params.channel),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_channel_fops = {
- .read = dtf_read_channel,
- .write = dtf_write_channel,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static ssize_t dtf_read_device(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.device,
- strlen(info->dbgfs_params.device));
-}
-
-static ssize_t dtf_write_device(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.device,
- sizeof(info->dbgfs_params.device),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_device_fops = {
- .read = dtf_read_device,
- .write = dtf_write_device,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1187,8 +1135,6 @@ static const struct file_operations dtf_results_fops = {
static int dmatest_register_dbgfs(struct dmatest_info *info)
{
struct dentry *d;
- struct dmatest_params *params = &info->dbgfs_params;
- int ret = -ENOMEM;
d = debugfs_create_dir("dmatest", NULL);
if (IS_ERR(d))
@@ -1198,81 +1144,24 @@ static int dmatest_register_dbgfs(struct dmatest_info *info)
info->root = d;
- /* Copy initial values */
- memcpy(params, &info->params, sizeof(*params));
-
- /* Test parameters */
-
- d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->buf_size);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_channel_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_device_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->threads_per_chan);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->max_channels);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->iterations);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->xor_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->pq_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->timeout);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
/* Run or stop threaded test */
- d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
- info, &dtf_run_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
+ &dtf_run_fops);
/* Results of test in progress */
- d = debugfs_create_file("results", S_IRUGO, info->root, info,
- &dtf_results_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("results", S_IRUGO, info->root, info,
+ &dtf_results_fops);
return 0;
-err_node:
- debugfs_remove_recursive(info->root);
err_root:
pr_err("dmatest: Failed to initialize debugfs\n");
- return ret;
+ return -ENOMEM;
}
static int __init dmatest_init(void)
{
struct dmatest_info *info = &test_info;
- struct dmatest_params *params = &info->params;
int ret;
memset(info, 0, sizeof(*info));
@@ -1283,17 +1172,6 @@ static int __init dmatest_init(void)
mutex_init(&info->results_lock);
INIT_LIST_HEAD(&info->results);
- /* Set default parameters */
- params->buf_size = test_buf_size;
- strlcpy(params->channel, test_channel, sizeof(params->channel));
- strlcpy(params->device, test_device, sizeof(params->device));
- params->threads_per_chan = threads_per_chan;
- params->max_channels = max_channels;
- params->iterations = iterations;
- params->xor_sources = xor_sources;
- params->pq_sources = pq_sources;
- params->timeout = timeout;
-
ret = dmatest_register_dbgfs(info);
if (ret)
return ret;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b642e035579b..d8ececaf1b57 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -251,7 +251,7 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
}
static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
- dma_addr_t addr, u32 offset, u8 coef, int idx)
+ dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
{
struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
struct ioat_pq16a_descriptor *pq16 =
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_xeon_cb32(pdev))
- dma->copy_align = 6;
-
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
- if (is_bwd_noraid(pdev))
+ if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma_set_maxpq(dma, 16, 0);
- dma->pq_align = 0;
} else {
dma_set_maxpq(dma, 8, 0);
- if (is_xeon_cb32(pdev))
- dma->pq_align = 6;
- else
- dma->pq_align = 0;
}
if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma->max_xor = 16;
- dma->xor_align = 0;
} else {
dma->max_xor = 8;
- if (is_xeon_cb32(pdev))
- dma->xor_align = 6;
- else
- dma->xor_align = 0;
}
}
}
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
- if (is_xeon_cb32(pdev)) {
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
-
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- }
-
/* starting with CB3.3 super extended descriptors are supported */
if (device->cap & IOAT_CAP_RAID16SS) {
char pool_name[14];
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 200f1a3c9a44..0ec086d2b6a0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -64,7 +64,7 @@ static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
int src_idx)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_src_addr[src_idx];
+ return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
}
@@ -107,32 +107,32 @@ static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- hw_desc->phy_src_addr[index] = addr;
+ hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
if (desc->type == DMA_XOR)
hw_desc->desc_command |= (1 << index);
}
static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
{
- return __raw_readl(XOR_CURR_DESC(chan));
+ return readl_relaxed(XOR_CURR_DESC(chan));
}
static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
u32 next_desc_addr)
{
- __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
+ writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
}
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{
- u32 val = __raw_readl(XOR_INTR_MASK(chan));
+ u32 val = readl_relaxed(XOR_INTR_MASK(chan));
val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
- __raw_writel(val, XOR_INTR_MASK(chan));
+ writel_relaxed(val, XOR_INTR_MASK(chan));
}
static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
{
- u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+ u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
return intr_cause;
}
@@ -149,13 +149,13 @@ static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
{
u32 val = 0xFFFF0000 >> (chan->idx * 16);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@ -173,7 +173,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
enum dma_transaction_type type)
{
u32 op_mode;
- u32 config = __raw_readl(XOR_CONFIG(chan));
+ u32 config = readl_relaxed(XOR_CONFIG(chan));
switch (type) {
case DMA_XOR:
@@ -192,7 +192,14 @@ static void mv_set_mode(struct mv_xor_chan *chan,
config &= ~0x7;
config |= op_mode;
- __raw_writel(config, XOR_CONFIG(chan));
+
+#if defined(__BIG_ENDIAN)
+ config |= XOR_DESCRIPTOR_SWAP;
+#else
+ config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
+
+ writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;
}
@@ -201,14 +208,14 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
u32 activation;
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- activation = __raw_readl(XOR_ACTIVATION(chan));
+ activation = readl_relaxed(XOR_ACTIVATION(chan));
activation |= 0x1;
- __raw_writel(activation, XOR_ACTIVATION(chan));
+ writel_relaxed(activation, XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
{
- u32 state = __raw_readl(XOR_ACTIVATION(chan));
+ u32 state = readl_relaxed(XOR_ACTIVATION(chan));
state = (state >> 4) & 0x3;
@@ -755,22 +762,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
{
u32 val;
- val = __raw_readl(XOR_CONFIG(chan));
+ val = readl_relaxed(XOR_CONFIG(chan));
dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
- val = __raw_readl(XOR_ACTIVATION(chan));
+ val = readl_relaxed(XOR_ACTIVATION(chan));
dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_CAUSE(chan));
+ val = readl_relaxed(XOR_INTR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_MASK(chan));
+ val = readl_relaxed(XOR_INTR_MASK(chan));
dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_CAUSE(chan));
+ val = readl_relaxed(XOR_ERROR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_ADDR(chan));
+ val = readl_relaxed(XOR_ERROR_ADDR(chan));
dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
@@ -1029,10 +1036,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
struct dma_device *dma_dev;
mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
+ if (!mv_chan)
+ return ERR_PTR(-ENOMEM);
mv_chan->idx = idx;
mv_chan->irq = irq;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c619359cb7fe..06b067f24c9b 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -29,8 +29,10 @@
#define MV_XOR_THRESHOLD 1
#define MV_XOR_MAX_CHANNELS 2
+/* Values for the XOR_CONFIG register */
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
+#define XOR_DESCRIPTOR_SWAP BIT(14)
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
@@ -143,7 +145,16 @@ struct mv_xor_desc_slot {
#endif
};
-/* This structure describes XOR descriptor size 64bytes */
+/*
+ * This structure describes XOR descriptor size 64bytes. The
+ * mv_phy_src_idx() macro must be used when indexing the values of the
+ * phy_src_addr[] array. This is due to the fact that the 'descriptor
+ * swap' feature, used on big endian systems, swaps descriptors data
+ * within blocks of 8 bytes. So two consecutive values of the
+ * phy_src_addr[] array are actually swapped in big-endian, which
+ * explains the different mv_phy_src_idx() implementation.
+ */
+#if defined(__LITTLE_ENDIAN)
struct mv_xor_desc {
u32 status; /* descriptor execution status */
u32 crc32_result; /* result of CRC-32 calculation */
@@ -155,6 +166,21 @@ struct mv_xor_desc {
u32 reserved0;
u32 reserved1;
};
+#define mv_phy_src_idx(src_idx) (src_idx)
+#else
+struct mv_xor_desc {
+ u32 crc32_result; /* result of CRC-32 calculation */
+ u32 status; /* descriptor execution status */
+ u32 phy_next_desc; /* next descriptor address pointer */
+ u32 desc_command; /* type of operation to be carried out */
+ u32 phy_dest_addr; /* destination block address */
+ u32 byte_count; /* size of src/dst blocks in bytes */
+ u32 phy_src_addr[8]; /* source block addresses */
+ u32 reserved1;
+ u32 reserved0;
+};
+#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
+#endif
#define to_mv_sw_desc(addr_hw_desc) \
container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)