diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 11:36:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 11:36:34 -0700 |
commit | 6f9b5ed8caddfbc94af8307c557ed57a8ec5c65c (patch) | |
tree | dda90fb07a35fc07cf0cedc22f0f6be61b29cf53 /drivers | |
parent | 54c2cc79194c961a213c1d375fe3aa4165664cc4 (diff) | |
parent | 90de6805267f8c79cd2b1a36805071e257c39b5c (diff) | |
download | lwn-6f9b5ed8caddfbc94af8307c557ed57a8ec5c65c.tar.gz lwn-6f9b5ed8caddfbc94af8307c557ed57a8ec5c65c.zip |
Merge tag 'char-misc-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc / other smaller driver subsystem updates from Greg KH:
"Here is the large set of char, misc, and other driver subsystem
updates for 5.19-rc1. The merge request for this has been delayed as I
wanted to get lots of linux-next testing due to some late arrivals of
changes for the habannalabs driver.
Highlights of this merge are:
- habanalabs driver updates for new hardware types and fixes and
other updates
- IIO driver tree merge which includes loads of new IIO drivers and
cleanups and additions
- PHY driver tree merge with new drivers and small updates to
existing ones
- interconnect driver tree merge with fixes and updates
- soundwire driver tree merge with some small fixes
- coresight driver tree merge with small fixes and updates
- mhi bus driver tree merge with lots of updates and new device
support
- firmware driver updates
- fpga driver updates
- lkdtm driver updates (with a merge conflict, more on that below)
- extcon driver tree merge with small updates
- lots of other tiny driver updates and fixes and cleanups, full
details in the shortlog.
All of these have been in linux-next for almost 2 weeks with no
reported problems"
* tag 'char-misc-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (387 commits)
habanalabs: use separate structure info for each error collect data
habanalabs: fix missing handle shift during mmap
habanalabs: remove hdev from hl_ctx_get args
habanalabs: do MMU prefetch as deferred work
habanalabs: order memory manager messages
habanalabs: return -EFAULT on copy_to_user error
habanalabs: use NULL for eventfd
habanalabs: update firmware header
habanalabs: add support for notification via eventfd
habanalabs: add topic to memory manager buffer
habanalabs: handle race in driver fini
habanalabs: add device memory scrub ability through debugfs
habanalabs: use unified memory manager for CB flow
habanalabs: unified memory manager new code for CB flow
habanalabs/gaudi: set arbitration timeout to a high value
habanalabs: add put by handle method to memory manager
habanalabs: hide memory manager page shift
habanalabs: Add separate poll interval value for protocol
habanalabs: use get_task_pid() to take PID
habanalabs: add prefetch flag to the MAP operation
...
Diffstat (limited to 'drivers')
286 files changed, 14013 insertions, 4413 deletions
diff --git a/drivers/accessibility/speakup/fakekey.c b/drivers/accessibility/speakup/fakekey.c index cd029968462f..868c47b2a59b 100644 --- a/drivers/accessibility/speakup/fakekey.c +++ b/drivers/accessibility/speakup/fakekey.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* fakekey.c - * Functions for simulating keypresses. + * Functions for simulating key presses. * * Copyright (C) 2010 the Speakup Team */ @@ -78,7 +78,7 @@ void speakup_fake_down_arrow(void) } /* - * Are we handling a simulated keypress on the current CPU? + * Are we handling a simulated key press on the current CPU? * Returns a boolean. */ bool speakup_fake_key_pressed(void) diff --git a/drivers/accessibility/speakup/serialio.c b/drivers/accessibility/speakup/serialio.c index 53580bdc5baa..3418ea31d28f 100644 --- a/drivers/accessibility/speakup/serialio.c +++ b/drivers/accessibility/speakup/serialio.c @@ -59,7 +59,7 @@ const struct old_serial_port *spk_serial_init(int index) } ser = rs_table + index; - /* Divisor, bytesize and parity */ + /* Divisor, byte size and parity */ quot = ser->baud_base / baud; cval = cflag & (CSIZE | CSTOPB); #if defined(__powerpc__) || defined(__alpha__) diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c index 023172ca22ef..a55b60754eb1 100644 --- a/drivers/accessibility/speakup/speakup_acntpc.c +++ b/drivers/accessibility/speakup/speakup_acntpc.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * this code is specificly written as a driver for the speakup screenreview + * this code is specifically written as a driver for the speakup screenreview * package and is not a general device driver. * This driver is for the Aicom Acent PC internal synthesizer. */ diff --git a/drivers/accessibility/speakup/speakup_acntsa.c b/drivers/accessibility/speakup/speakup_acntsa.c index 3a863dc61286..2697c51ed6b5 100644 --- a/drivers/accessibility/speakup/speakup_acntsa.c +++ b/drivers/accessibility/speakup/speakup_acntsa.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * this code is specificly written as a driver for the speakup screenreview + * this code is specifically written as a driver for the speakup screenreview * package and is not a general device driver. */ diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c index cd63581b2e99..c84a7e0864b7 100644 --- a/drivers/accessibility/speakup/speakup_apollo.c +++ b/drivers/accessibility/speakup/speakup_apollo.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * this code is specificly written as a driver for the speakup screenreview + * this code is specifically written as a driver for the speakup screenreview * package and is not a general device driver. */ #include <linux/jiffies.h> diff --git a/drivers/accessibility/speakup/speakup_audptr.c b/drivers/accessibility/speakup/speakup_audptr.c index a0c3b8ae17a1..4d16d60db9b2 100644 --- a/drivers/accessibility/speakup/speakup_audptr.c +++ b/drivers/accessibility/speakup/speakup_audptr.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include "spk_priv.h" diff --git a/drivers/accessibility/speakup/speakup_bns.c b/drivers/accessibility/speakup/speakup_bns.c index 76dfa3f7c058..b8103eb117b8 100644 --- a/drivers/accessibility/speakup/speakup_bns.c +++ b/drivers/accessibility/speakup/speakup_bns.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * this code is specificly written as a driver for the speakup screenreview + * this code is specifically written as a driver for the speakup screenreview * package and is not a general device driver. */ #include "spk_priv.h" diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c index 092cfd08a9e1..eaebf62300a4 100644 --- a/drivers/accessibility/speakup/speakup_decext.c +++ b/drivers/accessibility/speakup/speakup_decext.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include <linux/jiffies.h> diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c index 78ca4987e619..2a7e8d727904 100644 --- a/drivers/accessibility/speakup/speakup_dectlk.c +++ b/drivers/accessibility/speakup/speakup_dectlk.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include <linux/unistd.h> diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c index a9dd5c45d237..6f01e010aaf4 100644 --- a/drivers/accessibility/speakup/speakup_dtlk.c +++ b/drivers/accessibility/speakup/speakup_dtlk.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * package it's not a general device driver. * This driver is for the RC Systems DoubleTalk PC internal synthesizer. */ diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c index 63c2f2943282..34f11cd47073 100644 --- a/drivers/accessibility/speakup/speakup_dummy.c +++ b/drivers/accessibility/speakup/speakup_dummy.c @@ -8,7 +8,7 @@ * Copyright (C) 2003 David Borowski. * Copyright (C) 2007 Samuel Thibault. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include "spk_priv.h" diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c index 1618be87bff1..f61b62f1ea4d 100644 --- a/drivers/accessibility/speakup/speakup_keypc.c +++ b/drivers/accessibility/speakup/speakup_keypc.c @@ -4,7 +4,7 @@ * * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * package it's not a general device driver. * This driver is for the Keynote Gold internal synthesizer. */ diff --git a/drivers/accessibility/speakup/speakup_ltlk.c b/drivers/accessibility/speakup/speakup_ltlk.c index 3e59b387d0c4..f885cfaa27c8 100644 --- a/drivers/accessibility/speakup/speakup_ltlk.c +++ b/drivers/accessibility/speakup/speakup_ltlk.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include "speakup.h" diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c index 19824e7006fe..99f1d4ac426a 100644 --- a/drivers/accessibility/speakup/speakup_soft.c +++ b/drivers/accessibility/speakup/speakup_soft.c @@ -5,7 +5,7 @@ * * Copyright (C) 2003 Kirk Reiser. * - * this code is specificly written as a driver for the speakup screenreview + * this code is specifically written as a driver for the speakup screenreview * package and is not a general device driver. */ @@ -397,6 +397,7 @@ static int softsynth_probe(struct spk_synth *synth) synthu_device.name = "softsynthu"; synthu_device.fops = &softsynthu_fops; if (misc_register(&synthu_device)) { + misc_deregister(&synth_device); pr_warn("Couldn't initialize miscdevice /dev/softsynthu.\n"); return -ENODEV; } diff --git a/drivers/accessibility/speakup/speakup_spkout.c b/drivers/accessibility/speakup/speakup_spkout.c index bd3d8dc300ff..5e3bb3aa98b6 100644 --- a/drivers/accessibility/speakup/speakup_spkout.c +++ b/drivers/accessibility/speakup/speakup_spkout.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include "spk_priv.h" diff --git a/drivers/accessibility/speakup/speakup_txprt.c b/drivers/accessibility/speakup/speakup_txprt.c index a7326f226a5e..9e781347f7eb 100644 --- a/drivers/accessibility/speakup/speakup_txprt.c +++ b/drivers/accessibility/speakup/speakup_txprt.c @@ -6,7 +6,7 @@ * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * - * specificly written as a driver for the speakup screenreview + * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include "spk_priv.h" diff --git a/drivers/android/binder.c b/drivers/android/binder.c index f3b639e89dd8..77d0b17cb646 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -133,18 +133,45 @@ static int binder_set_stop_on_user_error(const char *val, module_param_call(stop_on_user_error, binder_set_stop_on_user_error, param_get_int, &binder_stop_on_user_error, 0644); -#define binder_debug(mask, x...) \ - do { \ - if (binder_debug_mask & mask) \ - pr_info_ratelimited(x); \ - } while (0) +static __printf(2, 3) void binder_debug(int mask, const char *format, ...) +{ + struct va_format vaf; + va_list args; + + if (binder_debug_mask & mask) { + va_start(args, format); + vaf.va = &args; + vaf.fmt = format; + pr_info_ratelimited("%pV", &vaf); + va_end(args); + } +} + +#define binder_txn_error(x...) \ + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) + +static __printf(1, 2) void binder_user_error(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { + va_start(args, format); + vaf.va = &args; + vaf.fmt = format; + pr_info_ratelimited("%pV", &vaf); + va_end(args); + } + + if (binder_stop_on_user_error) + binder_stop_on_user_error = 2; +} -#define binder_user_error(x...) \ +#define binder_set_extended_error(ee, _id, _command, _param) \ do { \ - if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ - pr_info_ratelimited(x); \ - if (binder_stop_on_user_error) \ - binder_stop_on_user_error = 2; \ + (ee)->id = _id; \ + (ee)->command = _command; \ + (ee)->param = _param; \ } while (0) #define to_flat_binder_object(hdr) \ @@ -1481,6 +1508,8 @@ static void binder_free_txn_fixups(struct binder_transaction *t) list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { fput(fixup->file); + if (fixup->target_fd >= 0) + put_unused_fd(fixup->target_fd); list_del(&fixup->fixup_entry); kfree(fixup); } @@ -2220,6 +2249,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset, } fixup->file = file; fixup->offset = fd_offset; + fixup->target_fd = -1; trace_binder_transaction_fd_send(t, fd, fixup->offset); list_add_tail(&fixup->fixup_entry, &t->fd_fixups); @@ -2705,6 +2735,24 @@ static struct binder_node *binder_get_node_refs_for_txn( return target_node; } +static void binder_set_txn_from_error(struct binder_transaction *t, int id, + uint32_t command, int32_t param) +{ + struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); + + if (!from) { + /* annotation for sparse */ + __release(&from->proc->inner_lock); + return; + } + + /* don't override existing errors */ + if (from->ee.command == BR_OK) + binder_set_extended_error(&from->ee, id, command, param); + binder_inner_proc_unlock(from->proc); + binder_thread_dec_tmpref(from); +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -2750,6 +2798,10 @@ static void binder_transaction(struct binder_proc *proc, e->offsets_size = tr->offsets_size; strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); + binder_inner_proc_unlock(proc); + if (reply) { binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; @@ -2785,6 +2837,8 @@ static void binder_transaction(struct binder_proc *proc, if (target_thread == NULL) { /* annotation for sparse */ __release(&target_thread->proc->inner_lock); + binder_txn_error("%d:%d reply target not found\n", + thread->pid, proc->pid); return_error = BR_DEAD_REPLY; return_error_line = __LINE__; goto err_dead_binder; @@ -2850,6 +2904,8 @@ static void binder_transaction(struct binder_proc *proc, } } if (!target_node) { + binder_txn_error("%d:%d cannot find target node\n", + thread->pid, proc->pid); /* * return_error is set above */ @@ -2859,6 +2915,8 @@ static void binder_transaction(struct binder_proc *proc, } e->to_node = target_node->debug_id; if (WARN_ON(proc == target_proc)) { + binder_txn_error("%d:%d self transactions not allowed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; @@ -2866,6 +2924,8 @@ static void binder_transaction(struct binder_proc *proc, } if (security_binder_transaction(proc->cred, target_proc->cred) < 0) { + binder_txn_error("%d:%d transaction credentials failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EPERM; return_error_line = __LINE__; @@ -2937,6 +2997,8 @@ static void binder_transaction(struct binder_proc *proc, /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { + binder_txn_error("%d:%d cannot allocate transaction\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; return_error_line = __LINE__; @@ -2948,6 +3010,8 @@ static void binder_transaction(struct binder_proc *proc, tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { + binder_txn_error("%d:%d cannot allocate work for transaction\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; return_error_line = __LINE__; @@ -2994,6 +3058,8 @@ static void binder_transaction(struct binder_proc *proc, security_cred_getsecid(proc->cred, &secid); ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); if (ret) { + binder_txn_error("%d:%d failed to get security context\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3002,7 +3068,8 @@ static void binder_transaction(struct binder_proc *proc, added_size = ALIGN(secctx_sz, sizeof(u64)); extra_buffers_size += added_size; if (extra_buffers_size < added_size) { - /* integer overflow of extra_buffers_size */ + binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; @@ -3016,9 +3083,15 @@ static void binder_transaction(struct binder_proc *proc, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY), current->tgid); if (IS_ERR(t->buffer)) { - /* - * -ESRCH indicates VMA cleared. The target is dying. - */ + char *s; + + ret = PTR_ERR(t->buffer); + s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" + : (ret == -ENOSPC) ? ": no space left" + : (ret == -ENOMEM) ? ": memory allocation failed" + : ""; + binder_txn_error("cannot allocate buffer%s", s); + return_error_param = PTR_ERR(t->buffer); return_error = return_error_param == -ESRCH ? BR_DEAD_REPLY : BR_FAILED_REPLY; @@ -3101,6 +3174,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, buffer_offset, sizeof(object_offset))) { + binder_txn_error("%d:%d copy offset from buffer failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; @@ -3159,6 +3234,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, fp, sizeof(*fp))) { + binder_txn_error("%d:%d translate binder failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3176,6 +3253,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, fp, sizeof(*fp))) { + binder_txn_error("%d:%d translate handle failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3196,6 +3275,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, fp, sizeof(*fp))) { + binder_txn_error("%d:%d translate fd failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3265,6 +3346,8 @@ static void binder_transaction(struct binder_proc *proc, object_offset, fda, sizeof(*fda)); if (ret) { + binder_txn_error("%d:%d translate fd array failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret > 0 ? -EINVAL : ret; return_error_line = __LINE__; @@ -3292,6 +3375,8 @@ static void binder_transaction(struct binder_proc *proc, (const void __user *)(uintptr_t)bp->buffer, bp->length); if (ret) { + binder_txn_error("%d:%d deferred copy failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3315,6 +3400,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, bp, sizeof(*bp))) { + binder_txn_error("%d:%d failed to fixup parent\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3422,6 +3509,8 @@ static void binder_transaction(struct binder_proc *proc, return; err_dead_proc_or_thread: + binder_txn_error("%d:%d dead process or thread\n", + thread->pid, proc->pid); return_error_line = __LINE__; binder_dequeue_work(proc, tcomplete); err_translate_failed: @@ -3457,21 +3546,26 @@ err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: - if (target_thread) - binder_thread_dec_tmpref(target_thread); - if (target_proc) - binder_proc_dec_tmpref(target_proc); if (target_node) { binder_dec_node(target_node, 1, 0); binder_dec_node_tmpref(target_node); } binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", - proc->pid, thread->pid, return_error, return_error_param, + "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", + proc->pid, thread->pid, reply ? "reply" : + (tr->flags & TF_ONE_WAY ? "async" : "call"), + target_proc ? target_proc->pid : 0, + target_thread ? target_thread->pid : 0, + t_debug_id, return_error, return_error_param, (u64)tr->data_size, (u64)tr->offsets_size, return_error_line); + if (target_thread) + binder_thread_dec_tmpref(target_thread); + if (target_proc) + binder_proc_dec_tmpref(target_proc); + { struct binder_transaction_log_entry *fe; @@ -3491,10 +3585,16 @@ err_invalid_target_handle: BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { + binder_set_txn_from_error(in_reply_to, t_debug_id, + return_error, return_error_param); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, + return_error, return_error_param); + binder_inner_proc_unlock(proc); thread->return_error.cmd = return_error; binder_enqueue_thread_work(thread, &thread->return_error.work); } @@ -3984,7 +4084,7 @@ static int binder_thread_write(struct binder_proc *proc, } break; default: - pr_err("%d:%d unknown command %d\n", + pr_err("%d:%d unknown command %u\n", proc->pid, thread->pid, cmd); return -EINVAL; } @@ -4075,10 +4175,9 @@ static int binder_wait_for_work(struct binder_thread *thread, * Now that we are in the context of the transaction target * process, we can allocate and install fds. Process the * list of fds to translate and fixup the buffer with the - * new fds. + * new fds first and only then install the files. * - * If we fail to allocate an fd, then free the resources by - * fput'ing files that have not been processed and ksys_close'ing + * If we fail to allocate an fd, skip the install and release * any fds that have already been allocated. */ static int binder_apply_fd_fixups(struct binder_proc *proc, @@ -4095,41 +4194,31 @@ static int binder_apply_fd_fixups(struct binder_proc *proc, "failed fd fixup txn %d fd %d\n", t->debug_id, fd); ret = -ENOMEM; - break; + goto err; } binder_debug(BINDER_DEBUG_TRANSACTION, "fd fixup txn %d fd %d\n", t->debug_id, fd); trace_binder_transaction_fd_recv(t, fd, fixup->offset); - fd_install(fd, fixup->file); - fixup->file = NULL; + fixup->target_fd = fd; if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, fixup->offset, &fd, sizeof(u32))) { ret = -EINVAL; - break; + goto err; } } list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { - if (fixup->file) { - fput(fixup->file); - } else if (ret) { - u32 fd; - int err; - - err = binder_alloc_copy_from_buffer(&proc->alloc, &fd, - t->buffer, - fixup->offset, - sizeof(fd)); - WARN_ON(err); - if (!err) - binder_deferred_fd_close(fd); - } + fd_install(fixup->target_fd, fixup->file); list_del(&fixup->fixup_entry); kfree(fixup); } return ret; + +err: + binder_free_txn_fixups(t); + return ret; } static int binder_thread_read(struct binder_proc *proc, @@ -4490,7 +4579,7 @@ retry: trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", + "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : (cmd == BR_TRANSACTION_SEC_CTX) ? @@ -4632,6 +4721,7 @@ static struct binder_thread *binder_get_thread_ilocked( thread->return_error.cmd = BR_OK; thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; thread->reply_error.cmd = BR_OK; + thread->ee.command = BR_OK; INIT_LIST_HEAD(&new_thread->waiting_thread_node); return thread; } @@ -5070,6 +5160,22 @@ static int binder_ioctl_get_freezer_info( return 0; } +static int binder_ioctl_get_extended_error(struct binder_thread *thread, + void __user *ubuf) +{ + struct binder_extended_error ee; + + binder_inner_proc_lock(thread->proc); + ee = thread->ee; + binder_set_extended_error(&thread->ee, 0, BR_OK, 0); + binder_inner_proc_unlock(thread->proc); + + if (copy_to_user(ubuf, &ee, sizeof(ee))) + return -EFAULT; + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; @@ -5278,6 +5384,11 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } + case BINDER_GET_EXTENDED_ERROR: + ret = binder_ioctl_get_extended_error(thread, ubuf); + if (ret < 0) + goto err; + break; default: ret = -EINVAL; goto err; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 2ac1008a5f39..5649a0371a1f 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1175,14 +1175,11 @@ static void binder_alloc_clear_buf(struct binder_alloc *alloc, unsigned long size; struct page *page; pgoff_t pgoff; - void *kptr; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); - kptr = kmap(page) + pgoff; - memset(kptr, 0, size); - kunmap(page); + memset_page(page, pgoff, 0, size); bytes -= size; buffer_offset += size; } @@ -1220,9 +1217,9 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); - kptr = kmap(page) + pgoff; + kptr = kmap_local_page(page) + pgoff; ret = copy_from_user(kptr, from, size); - kunmap(page); + kunmap_local(kptr); if (ret) return bytes - size + ret; bytes -= size; @@ -1247,23 +1244,14 @@ static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, unsigned long size; struct page *page; pgoff_t pgoff; - void *tmpptr; - void *base_ptr; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); - base_ptr = kmap_atomic(page); - tmpptr = base_ptr + pgoff; if (to_buffer) - memcpy(tmpptr, ptr, size); + memcpy_to_page(page, pgoff, ptr, size); else - memcpy(ptr, tmpptr, size); - /* - * kunmap_atomic() takes care of flushing the cache - * if this device has VIVT cache arch - */ - kunmap_atomic(base_ptr); + memcpy_from_page(ptr, page, pgoff, size); bytes -= size; pgoff = 0; ptr = ptr + size; diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index d6b6b8cb7346..8dc0bccf8513 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -480,6 +480,8 @@ struct binder_proc { * (only accessed by this thread) * @reply_error: transaction errors reported by target thread * (protected by @proc->inner_lock) + * @ee: extended error information from this thread + * (protected by @proc->inner_lock) * @wait: wait queue for thread work * @stats: per-thread statistics * (atomics, no lock needed) @@ -504,6 +506,7 @@ struct binder_thread { bool process_todo; struct binder_error return_error; struct binder_error reply_error; + struct binder_extended_error ee; wait_queue_head_t wait; struct binder_stats stats; atomic_t tmp_ref; @@ -515,6 +518,7 @@ struct binder_thread { * @fixup_entry: list entry * @file: struct file to be associated with new fd * @offset: offset in buffer data to this fixup + * @target_fd: fd to use by the target to install @file * * List element for fd fixups in a transaction. Since file * descriptors need to be allocated in the context of the @@ -525,6 +529,7 @@ struct binder_txn_fd_fixup { struct list_head fixup_entry; struct file *file; size_t offset; + int target_fd; }; struct binder_transaction { diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index e3605cdd4335..6c5e94f6cb3a 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -60,6 +60,7 @@ enum binderfs_stats_mode { struct binder_features { bool oneway_spam_detection; + bool extended_error; }; static const struct constant_table binderfs_param_stats[] = { @@ -75,6 +76,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = { static struct binder_features binder_features = { .oneway_spam_detection = true, + .extended_error = true, }; static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb) @@ -615,6 +617,12 @@ static int init_binder_features(struct super_block *sb) if (IS_ERR(dentry)) return PTR_ERR(dentry); + dentry = binderfs_create_file(dir, "extended_error", + &binder_features_fops, + &binder_features.extended_error); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + return 0; } diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index 4748df7f9cd5..b39a11e6c624 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -6,3 +6,4 @@ # source "drivers/bus/mhi/host/Kconfig" +source "drivers/bus/mhi/ep/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 5f5708a249f5..46981331b38f 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,2 +1,5 @@ # Host MHI stack obj-y += host/ + +# Endpoint MHI stack +obj-y += ep/ diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h index b4ef9acd3ce7..f794b9c8049e 100644 --- a/drivers/bus/mhi/common.h +++ b/drivers/bus/mhi/common.h @@ -165,6 +165,22 @@ #define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) #define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0))) +/* State change event */ +#define MHI_SC_EV_PTR 0 +#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state)) +#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* EE event */ +#define MHI_EE_EV_PTR 0 +#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee)) +#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + + +/* Command Completion event */ +#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code)) +#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + /* Transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr) #define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len)) @@ -175,6 +191,12 @@ FIELD_PREP(BIT(9), ieot) | \ FIELD_PREP(BIT(8), ieob) | \ FIELD_PREP(BIT(0), chain)) +#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0)) +#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1)))) /* RSC transfer descriptor macros */ #define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr) diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig new file mode 100644 index 000000000000..90ab3b040672 --- /dev/null +++ b/drivers/bus/mhi/ep/Kconfig @@ -0,0 +1,10 @@ +config MHI_BUS_EP + tristate "Modem Host Interface (MHI) bus Endpoint implementation" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by a host processor to control + and communicate a modem device over a high speed peripheral + bus or shared memory. + + MHI_BUS_EP implements the MHI protocol for the endpoint devices, + such as SDX55 modem connected to the host machine over PCIe. diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile new file mode 100644 index 000000000000..aad85f180b70 --- /dev/null +++ b/drivers/bus/mhi/ep/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o +mhi_ep-y := main.o mmio.o ring.o sm.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h new file mode 100644 index 000000000000..a2125fa5fe2f --- /dev/null +++ b/drivers/bus/mhi/ep/internal.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_EP_INTERNAL_ +#define _MHI_EP_INTERNAL_ + +#include <linux/bitfield.h> + +#include "../common.h" + +extern struct bus_type mhi_ep_bus_type; + +#define MHI_REG_OFFSET 0x100 +#define BHI_REG_OFFSET 0x200 + +/* MHI registers */ +#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN) +#define EP_MHIVER (MHI_REG_OFFSET + MHIVER) +#define EP_MHICFG (MHI_REG_OFFSET + MHICFG) +#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF) +#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF) +#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF) +#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF) +#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF) +#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL) +#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS) +#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER) +#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER) +#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER) +#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER) +#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER) +#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER) +#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER) +#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER) +#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER) +#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER) +#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER) +#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER) +#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER) +#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER) +#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER) +#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER) + +/* MHI BHI registers */ +#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC) +#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV) + +/* MHI Doorbell registers */ +#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n)) +#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n)) +#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n)) +#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n)) + +#define MHI_CTRL_INT_STATUS 0x4 +#define MHI_CTRL_INT_STATUS_MSK BIT(0) +#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1) +#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n)) + +#define MHI_CTRL_INT_CLEAR 0x4c +#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2) +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) + +/* + * Unlike the usual "masking" convention, writing "1" to a bit in this register + * enables the interrupt and writing "0" will disable it.. + */ +#define MHI_CTRL_INT_MASK 0x94 +#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0) +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_CRDB_MASK BIT(1) + +#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0) + +#define NR_OF_CMD_RINGS 1 +#define MHI_MASK_ROWS_CH_DB 4 +#define MHI_MASK_ROWS_EV_DB 4 +#define MHI_MASK_CH_LEN 32 +#define MHI_MASK_EV_LEN 32 + +/* Generic context */ +struct mhi_generic_ctx { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +enum mhi_ep_ring_type { + RING_TYPE_CMD, + RING_TYPE_ER, + RING_TYPE_CH, +}; + +/* Ring element */ +union mhi_ep_ring_ctx { + struct mhi_cmd_ctxt cmd; + struct mhi_event_ctxt ev; + struct mhi_chan_ctxt ch; + struct mhi_generic_ctx generic; +}; + +struct mhi_ep_ring_item { + struct list_head node; + struct mhi_ep_ring *ring; +}; + +struct mhi_ep_ring { + struct mhi_ep_cntrl *mhi_cntrl; + union mhi_ep_ring_ctx *ring_ctx; + struct mhi_ring_element *ring_cache; + enum mhi_ep_ring_type type; + u64 rbase; + size_t rd_offset; + size_t wr_offset; + size_t ring_size; + u32 db_offset_h; + u32 db_offset_l; + u32 ch_id; + u32 er_index; + u32 irq_vector; + bool started; +}; + +struct mhi_ep_cmd { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_event { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_state_transition { + struct list_head node; + enum mhi_state state; +}; + +struct mhi_ep_chan { + char *name; + struct mhi_ep_device *mhi_dev; + struct mhi_ep_ring ring; + struct mutex lock; + void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); + enum mhi_ch_state state; + enum dma_data_direction dir; + u64 tre_loc; + u32 tre_size; + u32 tre_bytes_left; + u32 chan; + bool skip_td; +}; + +/* MHI Ring related functions */ +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id); +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring); +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx); +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr); +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element); +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring); +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring); + +/* MMIO related functions */ +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val); +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask); +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl); +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring); +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value); +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset); +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); + +/* MHI EP core functions */ +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); +bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state, + enum mhi_state mhi_state); +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state); +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl); + +#endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c new file mode 100644 index 000000000000..40109a79017a --- /dev/null +++ b/drivers/bus/mhi/ep/main.c @@ -0,0 +1,1591 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MHI Endpoint bus stack + * + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/dma-direction.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/mhi_ep.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include "internal.h" + +#define M0_WAIT_DELAY_MS 100 +#define M0_WAIT_COUNT 100 + +static DEFINE_IDA(mhi_ep_cntrl_ida); + +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +static int mhi_ep_destroy_device(struct device *dev, void *data); + +static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, + struct mhi_ring_element *el, bool bei) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + union mhi_ep_ring_ctx *ctx; + struct mhi_ep_ring *ring; + int ret; + + mutex_lock(&mhi_cntrl->event_lock); + ring = &mhi_cntrl->mhi_event[ring_idx].ring; + ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; + if (!ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); + if (ret) { + dev_err(dev, "Error starting event ring (%u)\n", ring_idx); + goto err_unlock; + } + } + + /* Add element to the event ring */ + ret = mhi_ep_ring_add_element(ring, el); + if (ret) { + dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); + goto err_unlock; + } + + mutex_unlock(&mhi_cntrl->event_lock); + + /* + * Raise IRQ to host only if the BEI flag is not set in TRE. Host might + * set this flag for interrupt moderation as per MHI protocol. + */ + if (!bei) + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + + return 0; + +err_unlock: + mutex_unlock(&mhi_cntrl->event_lock); + + return ret; +} + +static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) +{ + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event.dword[0] = MHI_TRE_EV_DWORD0(code, len); + event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + + return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); +} + +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_SC_EV_DWORD0(state); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_EE_EV_DWORD0(exec_env); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) +{ + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event.dword[0] = MHI_CC_EV_DWORD0(code); + event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_ring *ch_ring; + u32 tmp, ch_id; + int ret; + + ch_id = MHI_TRE_GET_CMD_CHID(el); + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; + + switch (MHI_TRE_GET_CMD_TYPE(el)) { + case MHI_PKT_TYPE_START_CHAN_CMD: + dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); + + mutex_lock(&mhi_chan->lock); + /* Initialize and configure the corresponding channel ring */ + if (!ch_ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, + (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); + if (ret) { + dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, + MHI_EV_CC_UNDEFINED_ERR); + if (ret) + dev_err(dev, "Error sending completion event: %d\n", ret); + + goto err_unlock; + } + } + + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + + /* + * Create MHI device only during UL channel start. Since the MHI + * channels operate in a pair, we'll associate both UL and DL + * channels to the same device. + * + * We also need to check for mhi_dev != NULL because, the host + * will issue START_CHAN command during resume and we don't + * destroy the device during suspend. + */ + if (!(ch_id % 2) && !mhi_chan->mhi_dev) { + ret = mhi_ep_create_device(mhi_cntrl, ch_id); + if (ret) { + dev_err(dev, "Error creating device for channel (%u)\n", ch_id); + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + } + + /* Finally, enable DB for the channel */ + mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); + + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Disable DB for the channel */ + mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); + + /* Send channel disconnect status to client drivers */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to STOP */ + mhi_chan->state = MHI_CH_STATE_STOP; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + case MHI_PKT_TYPE_RESET_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Stop and reset the transfer ring */ + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + + /* Send channel disconnect status to client driver */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + default: + dev_err(dev, "Invalid command received: %lu for channel (%u)\n", + MHI_TRE_GET_CMD_TYPE(el), ch_id); + return -EINVAL; + } + + return 0; + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + return !!(ring->rd_offset == ring->wr_offset); +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); + +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_result *result, + u32 len) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t tr_len, read_offset, write_offset; + struct mhi_ring_element *el; + bool tr_done = false; + void *write_addr; + u64 read_addr; + u32 buf_left; + int ret; + + buf_left = len; + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + return -ENODEV; + } + + el = &ring->ring_cache[ring->rd_offset]; + + /* Check if there is data pending to be read from previous read operation */ + if (mhi_chan->tre_bytes_left) { + dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); + tr_len = min(buf_left, mhi_chan->tre_bytes_left); + } else { + mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); + mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); + mhi_chan->tre_bytes_left = mhi_chan->tre_size; + + tr_len = min(buf_left, mhi_chan->tre_size); + } + + read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; + write_offset = len - buf_left; + read_addr = mhi_chan->tre_loc + read_offset; + write_addr = result->buf_addr + write_offset; + + dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); + return ret; + } + + buf_left -= tr_len; + mhi_chan->tre_bytes_left -= tr_len; + + /* + * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been + * read completely: + * + * 1. Send completion event to the host based on the flags set in TRE. + * 2. Increment the local read offset of the transfer ring. + */ + if (!mhi_chan->tre_bytes_left) { + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + + tr_done = true; + } + + mhi_ep_ring_inc_index(ring); + } + + result->bytes_xferd += tr_len; + } while (buf_left && !tr_done); + + return 0; +} + +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct mhi_result result = {}; + u32 len = MHI_EP_DEFAULT_MTU; + struct mhi_ep_chan *mhi_chan; + int ret; + + mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + /* + * Bail out if transfer callback is not registered for the channel. + * This is most likely due to the client driver not loaded at this point. + */ + if (!mhi_chan->xfer_cb) { + dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); + return -ENODEV; + } + + if (ring->ch_id % 2) { + /* DL channel */ + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } else { + /* UL channel */ + result.buf_addr = kzalloc(len, GFP_KERNEL); + if (!result.buf_addr) + return -ENOMEM; + + do { + ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + kfree(result.buf_addr); + return ret; + } + + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + result.bytes_xferd = 0; + memset(result.buf_addr, 0, len); + + /* Read until the ring becomes empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + + kfree(result.buf_addr); + } + + return 0; +} + +/* TODO: Handle partially formed TDs */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) +{ + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ring_element *el; + u32 buf_left, read_offset; + struct mhi_ep_ring *ring; + enum mhi_ev_ccs code; + void *read_addr; + u64 write_addr; + size_t tr_len; + u32 tre_len; + int ret; + + buf_left = skb->len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { + dev_err(dev, "TRE not available!\n"); + ret = -ENOSPC; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_TRE_DATA_GET_LEN(el); + + tr_len = min(buf_left, tre_len); + read_offset = skb->len - buf_left; + read_addr = skb->data + read_offset; + write_addr = MHI_TRE_DATA_GET_PTR(el); + + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); + goto err_exit; + } + + buf_left -= tr_len; + /* + * For all TREs queued by the host for DL channel, only the EOT flag will be set. + * If the packet doesn't fit into a single TRE, send the OVERFLOW event to + * the host so that the host can adjust the packet boundary to next TREs. Else send + * the EOT event to the host indicating the packet boundary. + */ + if (buf_left) + code = MHI_EV_CC_OVERFLOW; + else + code = MHI_EV_CC_EOT; + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (buf_left); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + +static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Update the number of event rings (NER) programmed by the host */ + mhi_ep_mmio_update_ner(mhi_cntrl); + + dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", + mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + /* Get the channel context base pointer from host */ + mhi_ep_mmio_get_chc_base(mhi_cntrl); + + /* Allocate and map memory for caching host channel context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, + &mhi_cntrl->ch_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ch_ctx_cache, + ch_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); + return ret; + } + + /* Get the event context base pointer from host */ + mhi_ep_mmio_get_erc_base(mhi_cntrl); + + /* Allocate and map memory for caching host event context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, + &mhi_cntrl->ev_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ev_ctx_cache, + ev_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); + goto err_ch_ctx; + } + + /* Get the command context base pointer from host */ + mhi_ep_mmio_get_crc_base(mhi_cntrl); + + /* Allocate and map memory for caching host command context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, + &mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->cmd_ctx_cache, + cmd_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); + goto err_ev_ctx; + } + + /* Initialize command ring */ + ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, + (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); + if (ret) { + dev_err(dev, "Failed to start the command ring\n"); + goto err_cmd_ctx; + } + + return ret; + +err_cmd_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + +err_ev_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + +err_ch_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); + + return ret; +} + +static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); +} + +static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) +{ + /* + * Doorbell interrupts are enabled when the corresponding channel gets started. + * Enabling all interrupts here triggers spurious irqs as some of the interrupts + * associated with hw channels always get triggered. + */ + mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); +} + +static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + bool mhi_reset; + u32 count = 0; + int ret; + + /* Wait for Host to set the M0 state */ + do { + msleep(M0_WAIT_DELAY_MS); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + /* Clear the MHI reset if host is in reset state */ + mhi_ep_mmio_clear_reset(mhi_cntrl); + dev_info(dev, "Detected Host reset while waiting for M0\n"); + } + count++; + } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); + + if (state != MHI_STATE_M0) { + dev_err(dev, "Host failed to enter M0\n"); + return -ETIMEDOUT; + } + + ret = mhi_ep_cache_host_cfg(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to cache host config\n"); + return ret; + } + + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Enable all interrupts now */ + mhi_ep_enable_int(mhi_cntrl); + + return 0; +} + +static void mhi_ep_cmd_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring_element *el; + int ret; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + return; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) + return; + + /* + * Process command ring element till write offset. In case of an error, just try to + * process next element. + */ + while (ring->rd_offset != ring->wr_offset) { + el = &ring->ring_cache[ring->rd_offset]; + + ret = mhi_ep_process_cmd_ring(ring, el); + if (ret) + dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); + + mhi_ep_ring_inc_index(ring); + } +} + +static void mhi_ep_ch_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_item *itr, *tmp; + struct mhi_ring_element *el; + struct mhi_ep_ring *ring; + struct mhi_ep_chan *chan; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + /* Process each queued channel ring. In case of an error, just process next element. */ + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + ring = itr->ring; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + kfree(itr); + continue; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) { + kfree(itr); + continue; + } + + el = &ring->ring_cache[ring->rd_offset]; + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + mutex_lock(&chan->lock); + dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); + ret = mhi_ep_process_ch_ring(ring, el); + if (ret) { + dev_err(dev, "Error processing ring for channel (%u): %d\n", + ring->ch_id, ret); + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + mutex_unlock(&chan->lock); + kfree(itr); + } +} + +static void mhi_ep_state_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_state_transition *itr, *tmp; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling MHI state transition to %s\n", + mhi_state_str(itr->state)); + + switch (itr->state) { + case MHI_STATE_M0: + ret = mhi_ep_set_m0_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M0 state\n"); + break; + case MHI_STATE_M3: + ret = mhi_ep_set_m3_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M3 state\n"); + break; + default: + dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); + break; + } + kfree(itr); + } +} + +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, + u32 ch_idx) +{ + struct mhi_ep_ring_item *item; + struct mhi_ep_ring *ring; + bool work = !!ch_int; + LIST_HEAD(head); + u32 i; + + /* First add the ring items to a local list */ + for_each_set_bit(i, &ch_int, 32) { + /* Channel index varies for each register: 0, 32, 64, 96 */ + u32 ch_id = ch_idx + i; + + ring = &mhi_cntrl->mhi_chan[ch_id].ring; + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->ring = ring; + list_add_tail(&item->node, &head); + } + + /* Now, splice the local list into ch_db_list and queue the work item */ + if (work) { + spin_lock(&mhi_cntrl->list_lock); + list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); + } +} + +/* + * Channel interrupt statuses are contained in 4 registers each of 32bit length. + * For checking all interrupts, we need to loop through each registers and then + * check for bits set. + */ +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ch_int, ch_idx, i; + + /* Bail out if there is no channel doorbell interrupt */ + if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) + return; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + ch_idx = i * MHI_MASK_CH_LEN; + + /* Only process channel interrupt if the mask is enabled */ + ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; + if (ch_int) { + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + mhi_cntrl->chdb[i].status); + } + } +} + +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_state_transition *item; + + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->state = state; + spin_lock(&mhi_cntrl->list_lock); + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); +} + +/* + * Interrupt handler that services interrupts raised by the host writing to + * MHICTRL and Command ring doorbell (CRDB) registers for state change and + * channel interrupts. + */ +static irqreturn_t mhi_ep_irq(int irq, void *data) +{ + struct mhi_ep_cntrl *mhi_cntrl = data; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 int_value; + bool mhi_reset; + + /* Acknowledge the ctrl interrupt */ + int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); + + /* Check for ctrl interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { + dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + dev_info(dev, "Host triggered MHI reset!\n"); + disable_irq_nosync(mhi_cntrl->irq); + schedule_work(&mhi_cntrl->reset_work); + return IRQ_HANDLED; + } + + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); + } + + /* Check for command doorbell interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { + dev_dbg(dev, "Processing command doorbell interrupt\n"); + queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); + } + + /* Check for channel interrupts */ + mhi_ep_check_channel_interrupt(mhi_cntrl); + + return IRQ_HANDLED; +} + +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_ring *ch_ring, *ev_ring; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int i; + + /* Stop all the channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mutex_unlock(&mhi_chan->lock); + } + + flush_workqueue(mhi_cntrl->wq); + + /* Destroy devices associated with all channels */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); + + /* Stop and reset the transfer rings */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + mutex_lock(&mhi_chan->lock); + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + mutex_unlock(&mhi_chan->lock); + } + + /* Stop and reset the event rings */ + for (i = 0; i < mhi_cntrl->event_rings; i++) { + ev_ring = &mhi_cntrl->mhi_event[i].ring; + if (!ev_ring->started) + continue; + + mutex_lock(&mhi_cntrl->event_lock); + mhi_ep_ring_reset(mhi_cntrl, ev_ring); + mutex_unlock(&mhi_cntrl->event_lock); + } + + /* Stop and reset the command ring */ + mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); + + mhi_ep_free_host_cfg(mhi_cntrl); + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + + mhi_cntrl->enabled = false; +} + +static void mhi_ep_reset_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state cur_state; + int ret; + + mhi_ep_abort_transfer(mhi_cntrl); + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ + mhi_ep_mmio_reset(mhi_cntrl); + cur_state = mhi_cntrl->mhi_state; + spin_unlock_bh(&mhi_cntrl->state_lock); + + /* + * Only proceed further if the reset is due to SYS_ERR. The host will + * issue reset during shutdown also and we don't need to do re-init in + * that case. + */ + if (cur_state == MHI_STATE_SYS_ERR) { + mhi_ep_mmio_init(mhi_cntrl); + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + return; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); + return; + } + + enable_irq(mhi_cntrl->irq); + } +} + +/* + * We don't need to do anything special other than setting the MHI SYS_ERR + * state. The host will reset all contexts and issue MHI RESET so that we + * could also recover from error state. + */ +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + return; + + /* Signal host that the device went to SYS_ERR state */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); +} + +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + /* + * Mask all interrupts until the state machine is ready. Interrupts will + * be enabled later with mhi_ep_enable(). + */ + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + mhi_ep_mmio_init(mhi_cntrl); + + mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Initialize command, channel and event rings */ + mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); + for (i = 0; i < mhi_cntrl->max_chan; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); + for (i = 0; i < mhi_cntrl->event_rings; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); + + mhi_cntrl->mhi_state = MHI_STATE_RESET; + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + goto err_free_event; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint\n"); + goto err_free_event; + } + + enable_irq(mhi_cntrl->irq); + mhi_cntrl->enabled = true; + + return 0; + +err_free_event: + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_power_up); + +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) +{ + if (mhi_cntrl->enabled) + mhi_ep_abort_transfer(mhi_cntrl); + + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); +} +EXPORT_SYMBOL_GPL(mhi_ep_power_down); + +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently running */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); + /* Set channel state to SUSPENDED */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently suspended */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); + /* Set channel state to RUNNING */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +static void mhi_ep_release_device(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + mhi_dev->mhi_cntrl->mhi_dev = NULL; + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created in mhi_ep_create_device() + * if the mhi_dev associated with it is NULL. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_device_type dev_type) +{ + struct mhi_ep_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_ep_bus_type; + dev->release = mhi_ep_release_device; + + /* Controller device is always allocated first */ + if (dev_type == MHI_DEVICE_CONTROLLER) + /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ + dev->parent = mhi_cntrl->cntrl_dev; + else + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_type = dev_type; + + return mhi_dev; +} + +/* + * MHI channels are always defined in pairs with UL as the even numbered + * channel and DL as odd numbered one. This function gets UL channel (primary) + * as the ch_id and always looks after the next entry in channel list for + * the corresponding DL channel (secondary). + */ +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + struct device *dev = mhi_cntrl->cntrl_dev; + struct mhi_ep_device *mhi_dev; + int ret; + + /* Check if the channel name is same for both UL and DL */ + if (strcmp(mhi_chan->name, mhi_chan[1].name)) { + dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", + mhi_chan->name, mhi_chan[1].name); + return -EINVAL; + } + + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); + if (IS_ERR(mhi_dev)) + return PTR_ERR(mhi_dev); + + /* Configure primary channel */ + mhi_dev->ul_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Configure secondary channel as well */ + mhi_chan++; + mhi_dev->dl_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + + return ret; +} + +static int mhi_ep_destroy_device(struct device *dev, void *data) +{ + struct mhi_ep_device *mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl; + struct mhi_ep_chan *ul_chan, *dl_chan; + + if (dev->bus != &mhi_ep_bus_type) + return 0; + + mhi_dev = to_mhi_ep_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy devices created for channels */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + if (ul_chan) + put_device(&ul_chan->mhi_dev->dev); + + if (dl_chan) + put_device(&dl_chan->mhi_dev->dev); + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + const struct mhi_ep_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + u32 chan, i; + int ret = -EINVAL; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * Allocate max_channels supported by the MHI endpoint and populate + * only the defined channels + */ + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), + GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + for (i = 0; i < config->num_channels; i++) { + struct mhi_ep_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", + chan, mhi_cntrl->max_chan); + goto error_chan_cfg; + } + + /* Bi-directional and direction less channels are not supported */ + if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { + dev_err(dev, "Invalid direction (%u) for channel (%u)\n", + ch_cfg->dir, chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + mhi_chan->dir = ch_cfg->dir; + mutex_init(&mhi_chan->lock); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +/* + * Allocate channel and command rings here. Event rings will be allocated + * in mhi_ep_power_up() as the config comes from the host. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + struct mhi_ep_device *mhi_dev; + int ret; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) + return -EINVAL; + + ret = mhi_ep_chan_init(mhi_cntrl, config); + if (ret) + return ret; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_ch; + } + + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); + INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); + INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); + + mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); + if (!mhi_cntrl->wq) { + ret = -ENOMEM; + goto err_free_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); + spin_lock_init(&mhi_cntrl->state_lock); + spin_lock_init(&mhi_cntrl->list_lock); + mutex_init(&mhi_cntrl->event_lock); + + /* Set MHI version and AMSS EE before enumeration */ + mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Set controller index */ + ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); + if (ret < 0) + goto err_destroy_wq; + + mhi_cntrl->index = ret; + + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, + "doorbell_irq", mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); + goto err_ida_free; + } + + /* Allocate the controller device */ + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); + ret = PTR_ERR(mhi_dev); + goto err_free_irq; + } + + dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + mhi_cntrl->mhi_dev = mhi_dev; + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_put_dev; + + dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); + + return 0; + +err_put_dev: + put_device(&mhi_dev->dev); +err_free_irq: + free_irq(mhi_cntrl->irq, mhi_cntrl); +err_ida_free: + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->wq); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_ch: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_register_controller); + +/* + * It is expected that the controller drivers will power down the MHI EP stack + * using "mhi_ep_power_down()" before calling this function to unregister themselves. + */ +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + + destroy_workqueue(mhi_cntrl->wq); + + free_irq(mhi_cntrl->irq, mhi_cntrl); + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); + +static int mhi_ep_driver_probe(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_ep_driver_remove(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Disconnect the channels associated with the driver */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to the client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mhi_chan->xfer_cb = NULL; + mutex_unlock(&mhi_chan->lock); + } + + /* Remove the client driver now */ + mhi_drv->remove(mhi_dev); + + return 0; +} + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + /* Client drivers should have callbacks defined for both channels */ + if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) + return -EINVAL; + + driver->bus = &mhi_ep_bus_type; + driver->owner = owner; + driver->probe = mhi_ep_driver_probe; + driver->remove = mhi_ep_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); + +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); + +static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + +static int mhi_ep_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +struct bus_type mhi_ep_bus_type = { + .name = "mhi_ep", + .dev_name = "mhi_ep", + .match = mhi_ep_match, + .uevent = mhi_ep_uevent, +}; + +static int __init mhi_ep_init(void) +{ + return bus_register(&mhi_ep_bus_type); +} + +static void __exit mhi_ep_exit(void) +{ + bus_unregister(&mhi_ep_bus_type); +} + +postcore_initcall(mhi_ep_init); +module_exit(mhi_ep_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Bus Endpoint stack"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c new file mode 100644 index 000000000000..b5bfd22f2c8e --- /dev/null +++ b/drivers/bus/mhi/ep/mmio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/mhi_ep.h> + +#include "internal.h" + +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset) +{ + return readl(mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val) +{ + writel(val, mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, offset); + regval &= ~mask; + regval |= (val << __ffs(mask)) & mask; + mhi_ep_mmio_write(mhi_cntrl, offset, regval); +} + +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask) +{ + u32 regval; + + regval = mhi_ep_mmio_read(dev, offset); + regval &= mask; + regval >>= __ffs(mask); + + return regval; +} + +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL); + *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval); + *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval); +} + +static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable) +{ + u32 chid_mask, chid_shift, chdb_idx, val; + + chid_shift = ch_id % 32; + chid_mask = BIT(chid_shift); + chdb_idx = ch_id / 32; + + val = enable ? 1 : 0; + + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val); + + /* Update the local copy of the channel mask */ + mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask; + mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift; +} + +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true); +} + +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false); +} + +static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val); + mhi_cntrl->chdb[i].mask = val; + } +} + +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true); +} + +static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false); +} + +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + bool chdb = false; + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i)); + if (mhi_cntrl->chdb[i].status) + chdb = true; + } + + /* Return whether a channel doorbell interrupt occurred or not */ + return chdb; +} + +static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val); +} + +static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 1); +} + +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 0); +} + +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 1); +} + +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 0); +} + +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl); + mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl); +} + +static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + MHI_CHDB_INT_CLEAR_n_CLEAR_ALL); + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i), + MHI_ERDB_INT_CLEAR_n_CLEAR_ALL); + + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, + MHI_CTRL_INT_MMIO_WR_CLEAR | + MHI_CTRL_INT_CRDB_CLEAR | + MHI_CTRL_INT_CRDB_MHICTRL_CLEAR); +} + +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER); + mhi_cntrl->ch_ctx_host_pa = regval; + mhi_cntrl->ch_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER); + mhi_cntrl->ch_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER); + mhi_cntrl->ev_ctx_host_pa = regval; + mhi_cntrl->ev_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER); + mhi_cntrl->ev_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER); + mhi_cntrl->cmd_ctx_host_pa = regval; + mhi_cntrl->cmd_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER); + mhi_cntrl->cmd_ctx_host_pa |= regval; +} + +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u64 db_offset; + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h); + db_offset = regval; + db_offset <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l); + db_offset |= regval; + + return db_offset; +} + +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value); +} + +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0); +} + +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0); + mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0); + mhi_ep_mmio_clear_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF); + mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF); + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); + + mhi_ep_mmio_reset(mhi_cntrl); +} + +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); +} diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c new file mode 100644 index 000000000000..115518ec76a4 --- /dev/null +++ b/drivers/bus/mhi/ep/ring.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/mhi_ep.h> +#include "internal.h" + +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr) +{ + return (ptr - ring->rbase) / sizeof(struct mhi_ring_element); +} + +static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring) +{ + __le64 rlen; + + memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64)); + + return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element); +} + +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring) +{ + ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size; +} + +static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t start, copy_size; + int ret; + + /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ + if (ring->type == RING_TYPE_ER) + return 0; + + /* No need to cache the ring if write pointer is unmodified */ + if (ring->wr_offset == end) + return 0; + + start = ring->wr_offset; + if (start < end) { + copy_size = (end - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + } else { + copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + + if (end) { + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, + &ring->ring_cache[0], + end * sizeof(struct mhi_ring_element)); + if (ret < 0) + return ret; + } + } + + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + + return 0; +} + +static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr) +{ + size_t wr_offset; + int ret; + + wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr); + + /* Cache the host ring till write offset */ + ret = __mhi_ep_cache_ring(ring, wr_offset); + if (ret) + return ret; + + ring->wr_offset = wr_offset; + + return 0; +} + +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring) +{ + u64 wr_ptr; + + wr_ptr = mhi_ep_mmio_get_db(ring); + + return mhi_ep_cache_ring(ring, wr_ptr); +} + +/* TODO: Support for adding multiple ring elements to the ring */ +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t old_offset = 0; + u32 num_free_elem; + __le64 rp; + int ret; + + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write pointer\n"); + return ret; + } + + if (ring->rd_offset < ring->wr_offset) + num_free_elem = (ring->wr_offset - ring->rd_offset) - 1; + else + num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1; + + /* Check if there is space in ring for adding at least an element */ + if (!num_free_elem) { + dev_err(dev, "No space left in the ring\n"); + return -ENOSPC; + } + + old_offset = ring->rd_offset; + mhi_ep_ring_inc_index(ring); + + dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset); + + /* Update rp in ring context */ + rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); + memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); + + ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), + sizeof(*el)); + if (ret < 0) + return ret; + + return 0; +} + +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) +{ + ring->type = type; + if (ring->type == RING_TYPE_CMD) { + ring->db_offset_h = EP_CRDB_HIGHER; + ring->db_offset_l = EP_CRDB_LOWER; + } else if (ring->type == RING_TYPE_CH) { + ring->db_offset_h = CHDB_HIGHER_n(id); + ring->db_offset_l = CHDB_LOWER_n(id); + ring->ch_id = id; + } else { + ring->db_offset_h = ERDB_HIGHER_n(id); + ring->db_offset_l = ERDB_LOWER_n(id); + } +} + +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + __le64 val; + int ret; + + ring->mhi_cntrl = mhi_cntrl; + ring->ring_ctx = ctx; + ring->ring_size = mhi_ep_ring_num_elems(ring); + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64)); + ring->rbase = le64_to_cpu(val); + + if (ring->type == RING_TYPE_CH) + ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); + + if (ring->type == RING_TYPE_ER) + ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + + /* During ring init, both rp and wp are equal */ + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); + ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + + /* Allocate ring cache memory for holding the copy of host ring */ + ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64)); + ret = mhi_ep_cache_ring(ring, le64_to_cpu(val)); + if (ret) { + dev_err(dev, "Failed to cache ring\n"); + kfree(ring->ring_cache); + return ret; + } + + ring->started = true; + + return 0; +} + +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) +{ + ring->started = false; + kfree(ring->ring_cache); + ring->ring_cache = NULL; +} diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c new file mode 100644 index 000000000000..3655c19e23c7 --- /dev/null +++ b/drivers/bus/mhi/ep/sm.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/errno.h> +#include <linux/mhi_ep.h> +#include "internal.h" + +bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state cur_mhi_state, + enum mhi_state mhi_state) +{ + if (mhi_state == MHI_STATE_SYS_ERR) + return true; /* Allowed in any state */ + + if (mhi_state == MHI_STATE_READY) + return cur_mhi_state == MHI_STATE_RESET; + + if (mhi_state == MHI_STATE_M0) + return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY; + + if (mhi_state == MHI_STATE_M3) + return cur_mhi_state == MHI_STATE_M0; + + return false; +} + +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) { + dev_err(dev, "MHI state change to %s from %s is not allowed!\n", + mhi_state_str(mhi_state), + mhi_state_str(mhi_cntrl->mhi_state)); + return -EACCES; + } + + /* TODO: Add support for M1 and M2 states */ + if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) { + dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state)); + return -EOPNOTSUPP; + } + + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state); + mhi_cntrl->mhi_state = mhi_state; + + if (mhi_state == MHI_STATE_READY) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1); + + if (mhi_state == MHI_STATE_SYS_ERR) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1); + + return 0; +} + +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state old_state; + int ret; + + /* If MHI is in M3, resume suspended channels */ + spin_lock_bh(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; + if (old_state == MHI_STATE_M3) + mhi_ep_resume_channels(mhi_cntrl); + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + + /* Signal host that the device moved to M0 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); + if (ret) { + dev_err(dev, "Failed sending M0 state change event\n"); + return ret; + } + + if (old_state == MHI_STATE_READY) { + /* Send AMSS EE event to host */ + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); + if (ret) { + dev_err(dev, "Failed sending AMSS EE event\n"); + return ret; + } + } + + return 0; +} + +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + + mhi_ep_suspend_channels(mhi_cntrl); + + /* Signal host that the device moved to M3 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); + if (ret) { + dev_err(dev, "Failed sending M3 state change event\n"); + return ret; + } + + return 0; +} + +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state mhi_state; + int ret, is_ready; + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ + mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); + is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); + + if (mhi_state != MHI_STATE_RESET || is_ready) { + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); + spin_unlock_bh(&mhi_cntrl->state_lock); + return -EIO; + } + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + mhi_ep_handle_syserr(mhi_cntrl); + + return ret; +} diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c index b0da7ca4519c..26d0eddb1477 100644 --- a/drivers/bus/mhi/host/boot.c +++ b/drivers/bus/mhi/host/boot.c @@ -19,8 +19,8 @@ #include "internal.h" /* Setup RDDM vector table for RDDM transfer and program RXVEC */ -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, - struct image_info *img_info) +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) { struct mhi_buf *mhi_buf = img_info->mhi_buf; struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; @@ -28,6 +28,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 sequence_id; unsigned int i; + int ret; for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { bhi_vec->dma_addr = mhi_buf->dma_addr; @@ -45,11 +46,17 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); - mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, - BHIE_RXVECDB_SEQNUM_BMSK, sequence_id); + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, sequence_id); + if (ret) { + dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n"); + return ret; + } dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", &mhi_buf->dma_addr, mhi_buf->len, sequence_id); + + return 0; } /* Collect RDDM buffer during kernel panic */ @@ -198,10 +205,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); - mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, - BHIE_TXVECDB_SEQNUM_BMSK, sequence_id); + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, sequence_id); read_unlock_bh(pm_lock); + if (ret) + return ret; + /* Wait for the image download to complete */ ret = wait_event_timeout(mhi_cntrl->state_event, MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index a665b8e92408..c137d55ccfa0 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -86,7 +86,7 @@ static ssize_t serial_number_show(struct device *dev, struct mhi_device *mhi_dev = to_mhi_device(dev); struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", + return sysfs_emit(buf, "Serial Number: %u\n", mhi_cntrl->serial_number); } static DEVICE_ATTR_RO(serial_number); @@ -100,17 +100,30 @@ static ssize_t oem_pk_hash_show(struct device *dev, int i, cnt = 0; for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) - cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, - "OEMPKHASH[%d]: 0x%x\n", i, - mhi_cntrl->oem_pk_hash[i]); + cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", + i, mhi_cntrl->oem_pk_hash[i]); return cnt; } static DEVICE_ATTR_RO(oem_pk_hash); +static ssize_t soc_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_soc_reset(mhi_cntrl); + return count; +} +static DEVICE_ATTR_WO(soc_reset); + static struct attribute *mhi_dev_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_oem_pk_hash.attr, + &dev_attr_soc_reset.attr, NULL, }; ATTRIBUTE_GROUPS(mhi_dev); @@ -425,74 +438,65 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) struct device *dev = &mhi_cntrl->mhi_dev->dev; struct { u32 offset; - u32 mask; u32 val; } reg_info[] = { { - CCABAP_HIGHER, U32_MAX, + CCABAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), }, { - CCABAP_LOWER, U32_MAX, + CCABAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), }, { - ECABAP_HIGHER, U32_MAX, + ECABAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), }, { - ECABAP_LOWER, U32_MAX, + ECABAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), }, { - CRCBAP_HIGHER, U32_MAX, + CRCBAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), }, { - CRCBAP_LOWER, U32_MAX, + CRCBAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), }, { - MHICFG, MHICFG_NER_MASK, - mhi_cntrl->total_ev_rings, - }, - { - MHICFG, MHICFG_NHWER_MASK, - mhi_cntrl->hw_ev_rings, - }, - { - MHICTRLBASE_HIGHER, U32_MAX, + MHICTRLBASE_HIGHER, upper_32_bits(mhi_cntrl->iova_start), }, { - MHICTRLBASE_LOWER, U32_MAX, + MHICTRLBASE_LOWER, lower_32_bits(mhi_cntrl->iova_start), }, { - MHIDATABASE_HIGHER, U32_MAX, + MHIDATABASE_HIGHER, upper_32_bits(mhi_cntrl->iova_start), }, { - MHIDATABASE_LOWER, U32_MAX, + MHIDATABASE_LOWER, lower_32_bits(mhi_cntrl->iova_start), }, { - MHICTRLLIMIT_HIGHER, U32_MAX, + MHICTRLLIMIT_HIGHER, upper_32_bits(mhi_cntrl->iova_stop), }, { - MHICTRLLIMIT_LOWER, U32_MAX, + MHICTRLLIMIT_LOWER, lower_32_bits(mhi_cntrl->iova_stop), }, { - MHIDATALIMIT_HIGHER, U32_MAX, + MHIDATALIMIT_HIGHER, upper_32_bits(mhi_cntrl->iova_stop), }, { - MHIDATALIMIT_LOWER, U32_MAX, + MHIDATALIMIT_LOWER, lower_32_bits(mhi_cntrl->iova_stop), }, - { 0, 0, 0 } + {0, 0} }; dev_dbg(dev, "Initializing MHI registers\n"); @@ -534,8 +538,22 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) /* Write to MMIO registers */ for (i = 0; reg_info[i].offset; i++) - mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, - reg_info[i].mask, reg_info[i].val); + mhi_write_reg(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].val); + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK, + mhi_cntrl->total_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; + } + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK, + mhi_cntrl->hw_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; + } return 0; } @@ -1103,8 +1121,15 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) */ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, mhi_cntrl->rddm_size); - if (mhi_cntrl->rddm_image) - mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + if (mhi_cntrl->rddm_image) { + ret = mhi_rddm_prepare(mhi_cntrl, + mhi_cntrl->rddm_image); + if (ret) { + mhi_free_bhie_table(mhi_cntrl, + mhi_cntrl->rddm_image); + goto error_reg_offset; + } + } } mutex_unlock(&mhi_cntrl->pm_mutex); diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index b47d8ef2624a..01fd10a399b6 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -324,8 +324,9 @@ int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, u32 val, u32 delayus); void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val); -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 val); +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val); void mhi_ring_er_db(struct mhi_event *mhi_event); void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, dma_addr_t db_val); @@ -339,7 +340,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 9021be7f2359..f3aef77a6a4a 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -65,19 +65,22 @@ void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); } -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 val) +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val) { int ret; u32 tmp; ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); if (ret) - return; + return ret; tmp &= ~mask; tmp |= (val << __ffs(mask)); mhi_write_reg(mhi_cntrl, base, offset, tmp); + + return 0; } void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, @@ -531,18 +534,13 @@ irqreturn_t mhi_intvec_handler(int irq_number, void *dev) static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) { - dma_addr_t ctxt_wp; - /* Update the WP */ ring->wp += ring->el_size; - ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; - if (ring->wp >= (ring->base + ring->len)) { + if (ring->wp >= (ring->base + ring->len)) ring->wp = ring->base; - ctxt_wp = ring->iommu_base; - } - *ring->ctxt_wp = cpu_to_le64(ctxt_wp); + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); /* Update the RP */ ring->rp += ring->el_size; diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 541ced27d941..841626727f6b 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -371,7 +371,16 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .sideband_wake = false, }; -static const struct mhi_channel_config mhi_mv31_channels[] = { +static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = { + .name = "foxconn-sdx65", + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_mv3x_channels[] = { MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), /* MBIM Control Channel */ @@ -382,25 +391,33 @@ static const struct mhi_channel_config mhi_mv31_channels[] = { MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), }; -static struct mhi_event_config mhi_mv31_events[] = { +static struct mhi_event_config mhi_mv3x_events[] = { MHI_EVENT_CONFIG_CTRL(0, 256), MHI_EVENT_CONFIG_DATA(1, 256), MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), }; -static const struct mhi_controller_config modem_mv31_config = { +static const struct mhi_controller_config modem_mv3x_config = { .max_channels = 128, .timeout_ms = 20000, - .num_channels = ARRAY_SIZE(mhi_mv31_channels), - .ch_cfg = mhi_mv31_channels, - .num_events = ARRAY_SIZE(mhi_mv31_events), - .event_cfg = mhi_mv31_events, + .num_channels = ARRAY_SIZE(mhi_mv3x_channels), + .ch_cfg = mhi_mv3x_channels, + .num_events = ARRAY_SIZE(mhi_mv3x_events), + .event_cfg = mhi_mv3x_events, }; static const struct mhi_pci_dev_info mhi_mv31_info = { .name = "cinterion-mv31", - .config = &modem_mv31_config, + .config = &modem_mv3x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, +}; + +static const struct mhi_pci_dev_info mhi_mv32_info = { + .name = "cinterion-mv32", + .config = &modem_mv3x_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, .mru_default = 32768, @@ -446,20 +463,100 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { .sideband_wake = false, }; +static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), +}; + +static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), + .ch_cfg = mhi_telit_fn980_hw_v1_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), + .event_cfg = mhi_telit_fn980_hw_v1_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { + .name = "telit-fn980-hwv1", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_telit_fn980_hw_v1_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_telit_fn990_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_telit_fn990_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) +}; + +static const struct mhi_controller_config modem_telit_fn990_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), + .ch_cfg = mhi_telit_fn990_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn990_events), + .event_cfg = mhi_telit_fn990_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn990_info = { + .name = "telit-fn990", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + +/* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, + /* Telit FN980 hardware revision v1 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), + .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + /* Telit FN990 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, @@ -472,9 +569,21 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* T99W175 (sdx55), Based on Qualcomm new baseline */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W368 (sdx65) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + /* T99W373 (sdx62) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, /* MV31-W (Cinterion) */ { PCI_DEVICE(0x1269, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, + /* MV32-WA (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00ba), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, + /* MV32-WB (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00bb), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, { } }; MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 3d90b8ecd3d9..dc2e8ff3bff2 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -129,13 +129,20 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) { + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + if (state == MHI_STATE_RESET) { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 1); + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, 1); } else { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_MHISTATE_MASK, state); + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, state); } + + if (ret) + dev_err(dev, "Failed to set MHI state to: %s\n", + mhi_state_str(state)); } /* NOP for backward compatibility, host allowed to ring DB in M2 state */ @@ -476,6 +483,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) * hence re-program it */ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + + if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { + /* wait for ready to be set */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, + MHISTATUS, + MHISTATUS_READY_MASK, 1, 25000); + if (ret) + dev_err(dev, "Device failed to enter READY state\n"); + } } dev_dbg(dev, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 18363aa2a49d..9a7d12332fad 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3395,7 +3395,9 @@ static int sysc_remove(struct platform_device *pdev) struct sysc *ddata = platform_get_drvdata(pdev); int error; - cancel_delayed_work_sync(&ddata->idle_work); + /* Device can still be enabled, see deferred idle quirk in probe */ + if (cancel_delayed_work_sync(&ddata->idle_work)) + ti_sysc_idle(&ddata->idle_work.work); error = pm_runtime_resume_and_get(ddata->dev); if (error < 0) { diff --git a/drivers/char/mem.c b/drivers/char/mem.c index cc296f0823bd..84ca98ed1dad 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -101,7 +101,7 @@ static inline bool should_stop_iteration(void) { if (need_resched()) cond_resched(); - return fatal_signal_pending(current); + return signal_pending(current); } /* diff --git a/drivers/char/misc.c b/drivers/char/misc.c index ca5141ed5ef3..cba19bfdc44d 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -100,17 +100,18 @@ static const struct seq_operations misc_seq_ops = { static int misc_open(struct inode *inode, struct file *file) { int minor = iminor(inode); - struct miscdevice *c; + struct miscdevice *c = NULL, *iter; int err = -ENODEV; const struct file_operations *new_fops = NULL; mutex_lock(&misc_mtx); - list_for_each_entry(c, &misc_list, list) { - if (c->minor == minor) { - new_fops = fops_get(c->fops); - break; - } + list_for_each_entry(iter, &misc_list, list) { + if (iter->minor != minor) + continue; + c = iter; + new_fops = fops_get(iter->fops); + break; } if (!new_fops) { @@ -118,11 +119,12 @@ static int misc_open(struct inode *inode, struct file *file) request_module("char-major-%d-%d", MISC_MAJOR, minor); mutex_lock(&misc_mtx); - list_for_each_entry(c, &misc_list, list) { - if (c->minor == minor) { - new_fops = fops_get(c->fops); - break; - } + list_for_each_entry(iter, &misc_list, list) { + if (iter->minor != minor) + continue; + c = iter; + new_fops = fops_get(iter->fops); + break; } if (!new_fops) goto fail; diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 0daf1bc95360..8fc49b038372 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -922,7 +922,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd) // BIT7:parity error // BIT6:framing error - if (status & (BIT7 + BIT6)) { + if (status & (BIT7 | BIT6)) { if (status & BIT7) icount->parity++; else diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c index 5046486011c8..0f238648dcfe 100644 --- a/drivers/char/xillybus/xillybus_class.c +++ b/drivers/char/xillybus/xillybus_class.c @@ -174,18 +174,17 @@ void xillybus_cleanup_chrdev(void *private_data, struct device *dev) { int minor; - struct xilly_unit *unit; - bool found = false; + struct xilly_unit *unit = NULL, *iter; mutex_lock(&unit_mutex); - list_for_each_entry(unit, &unit_list, list_entry) - if (unit->private_data == private_data) { - found = true; + list_for_each_entry(iter, &unit_list, list_entry) + if (iter->private_data == private_data) { + unit = iter; break; } - if (!found) { + if (!unit) { dev_err(dev, "Weird bug: Failed to find unit\n"); mutex_unlock(&unit_mutex); return; @@ -216,22 +215,21 @@ int xillybus_find_inode(struct inode *inode, { int minor = iminor(inode); int major = imajor(inode); - struct xilly_unit *unit; - bool found = false; + struct xilly_unit *unit = NULL, *iter; mutex_lock(&unit_mutex); - list_for_each_entry(unit, &unit_list, list_entry) - if (unit->major == major && - minor >= unit->lowest_minor && - minor < (unit->lowest_minor + unit->num_nodes)) { - found = true; + list_for_each_entry(iter, &unit_list, list_entry) + if (iter->major == major && + minor >= iter->lowest_minor && + minor < (iter->lowest_minor + iter->num_nodes)) { + unit = iter; break; } mutex_unlock(&unit_mutex); - if (!found) + if (!unit) return -ENODEV; *private_data = unit->private_data; diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c index dc3551796e5e..39bcbfd908b4 100644 --- a/drivers/char/xillybus/xillyusb.c +++ b/drivers/char/xillybus/xillyusb.c @@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref) if (xdev->workq) destroy_workqueue(xdev->workq); + usb_put_dev(xdev->udev); kfree(xdev->channels); /* Argument may be NULL, and that's fine */ kfree(xdev); } diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c index 8eb1f699a857..d4e2ed709bfc 100644 --- a/drivers/comedi/drivers.c +++ b/drivers/comedi/drivers.c @@ -854,7 +854,7 @@ int comedi_load_firmware(struct comedi_device *dev, release_firmware(fw); } - return ret < 0 ? ret : 0; + return min(ret, 0); } EXPORT_SYMBOL_GPL(comedi_load_firmware); diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 005a82f671c3..0e5a5662d5a4 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -216,8 +216,11 @@ static int __init dio_init(void) /* Found a board, allocate it an entry in the list */ dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL); - if (!dev) + if (!dev) { + if (scode >= DIOII_SCBASE) + iounmap(va); return -ENOMEM; + } dev->bus = &dio_bus; dev->dev.parent = &dio_bus.dev; diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 0d42e49105dd..dca7cecb37e3 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -131,6 +131,7 @@ config EXTCON_PALMAS config EXTCON_PTN5150 tristate "NXP PTN5150 CC LOGIC USB EXTCON support" depends on I2C && (GPIOLIB || COMPILE_TEST) + depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH select REGMAP_I2C help Say Y here to enable support for USB peripheral and USB host @@ -156,7 +157,7 @@ config EXTCON_RT8973A from abnormal high input voltage (up to 28V). config EXTCON_SM5502 - tristate "Silicon Mitus SM5502/SM5504 EXTCON support" + tristate "Silicon Mitus SM5502/SM5504/SM5703 EXTCON support" depends on I2C select IRQ_DOMAIN select REGMAP_I2C diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index 7c6d5857ff25..180be768c215 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c @@ -394,8 +394,8 @@ static int axp288_extcon_probe(struct platform_device *pdev) if (adev) { info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev)); put_device(&adev->dev); - if (!info->id_extcon) - return -EPROBE_DEFER; + if (IS_ERR(info->id_extcon)) + return PTR_ERR(info->id_extcon); dev_info(dev, "controlling USB role\n"); } else { diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index fb527c23639e..ded1a85a5549 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -17,6 +17,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #define INT3496_GPIO_USB_ID 0 #define INT3496_GPIO_VBUS_EN 1 @@ -30,7 +31,9 @@ struct int3496_data { struct gpio_desc *gpio_usb_id; struct gpio_desc *gpio_vbus_en; struct gpio_desc *gpio_usb_mux; + struct regulator *vbus_boost; int usb_id_irq; + bool vbus_boost_enabled; }; static const unsigned int int3496_cable[] = { @@ -53,6 +56,27 @@ static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = { { }, }; +static void int3496_set_vbus_boost(struct int3496_data *data, bool enable) +{ + int ret; + + if (IS_ERR_OR_NULL(data->vbus_boost)) + return; + + if (data->vbus_boost_enabled == enable) + return; + + if (enable) + ret = regulator_enable(data->vbus_boost); + else + ret = regulator_disable(data->vbus_boost); + + if (ret == 0) + data->vbus_boost_enabled = enable; + else + dev_err(data->dev, "Error updating Vbus boost regulator: %d\n", ret); +} + static void int3496_do_usb_id(struct work_struct *work) { struct int3496_data *data = @@ -71,6 +95,8 @@ static void int3496_do_usb_id(struct work_struct *work) if (!IS_ERR(data->gpio_vbus_en)) gpiod_direction_output(data->gpio_vbus_en, !id); + else + int3496_set_vbus_boost(data, !id); extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id); } @@ -91,10 +117,12 @@ static int int3496_probe(struct platform_device *pdev) struct int3496_data *data; int ret; - ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios); - if (ret) { - dev_err(dev, "can't add GPIO ACPI mapping\n"); - return ret; + if (has_acpi_companion(dev)) { + ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios); + if (ret) { + dev_err(dev, "can't add GPIO ACPI mapping\n"); + return ret; + } } data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); @@ -106,7 +134,8 @@ static int int3496_probe(struct platform_device *pdev) if (ret) return ret; - data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN); + data->gpio_usb_id = + devm_gpiod_get(dev, "id", GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE); if (IS_ERR(data->gpio_usb_id)) { ret = PTR_ERR(data->gpio_usb_id); dev_err(dev, "can't request USB ID GPIO: %d\n", ret); @@ -120,12 +149,14 @@ static int int3496_probe(struct platform_device *pdev) } data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS); - if (IS_ERR(data->gpio_vbus_en)) - dev_info(dev, "can't request VBUS EN GPIO\n"); + if (IS_ERR(data->gpio_vbus_en)) { + dev_dbg(dev, "can't request VBUS EN GPIO\n"); + data->vbus_boost = devm_regulator_get_optional(dev, "vbus"); + } data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS); if (IS_ERR(data->gpio_usb_mux)) - dev_info(dev, "can't request USB MUX GPIO\n"); + dev_dbg(dev, "can't request USB MUX GPIO\n"); /* register extcon device */ data->edev = devm_extcon_dev_allocate(dev, int3496_cable); @@ -164,12 +195,19 @@ static const struct acpi_device_id int3496_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, int3496_acpi_match); +static const struct platform_device_id int3496_ids[] = { + { .name = "intel-int3496" }, + {}, +}; +MODULE_DEVICE_TABLE(platform, int3496_ids); + static struct platform_driver int3496_driver = { .driver = { .name = "intel-int3496", .acpi_match_table = int3496_acpi_match, }, .probe = int3496_probe, + .id_table = int3496_ids, }; module_platform_driver(int3496_driver); diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c index 5b9a3cf8df26..017a07197f38 100644 --- a/drivers/extcon/extcon-ptn5150.c +++ b/drivers/extcon/extcon-ptn5150.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/extcon-provider.h> #include <linux/gpio/consumer.h> +#include <linux/usb/role.h> /* PTN5150 registers */ #define PTN5150_REG_DEVICE_ID 0x01 @@ -52,6 +53,7 @@ struct ptn5150_info { int irq; struct work_struct irq_work; struct mutex mutex; + struct usb_role_switch *role_sw; }; /* List of detectable cables */ @@ -70,6 +72,7 @@ static const struct regmap_config ptn5150_regmap_config = { static void ptn5150_check_state(struct ptn5150_info *info) { unsigned int port_status, reg_data, vbus; + enum usb_role usb_role = USB_ROLE_NONE; int ret; ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, ®_data); @@ -85,6 +88,7 @@ static void ptn5150_check_state(struct ptn5150_info *info) extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false); gpiod_set_value_cansleep(info->vbus_gpiod, 0); extcon_set_state_sync(info->edev, EXTCON_USB, true); + usb_role = USB_ROLE_DEVICE; break; case PTN5150_UFP_ATTACHED: extcon_set_state_sync(info->edev, EXTCON_USB, false); @@ -95,10 +99,18 @@ static void ptn5150_check_state(struct ptn5150_info *info) gpiod_set_value_cansleep(info->vbus_gpiod, 1); extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true); + usb_role = USB_ROLE_HOST; break; default: break; } + + if (usb_role) { + ret = usb_role_switch_set_role(info->role_sw, usb_role); + if (ret) + dev_err(info->dev, "failed to set %s role: %d\n", + usb_role_string(usb_role), ret); + } } static void ptn5150_irq_work(struct work_struct *work) @@ -133,6 +145,13 @@ static void ptn5150_irq_work(struct work_struct *work) extcon_set_state_sync(info->edev, EXTCON_USB, false); gpiod_set_value_cansleep(info->vbus_gpiod, 0); + + ret = usb_role_switch_set_role(info->role_sw, + USB_ROLE_NONE); + if (ret) + dev_err(info->dev, + "failed to set none role: %d\n", + ret); } } @@ -194,6 +213,14 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info) return 0; } +static void ptn5150_work_sync_and_put(void *data) +{ + struct ptn5150_info *info = data; + + cancel_work_sync(&info->irq_work); + usb_role_switch_put(info->role_sw); +} + static int ptn5150_i2c_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; @@ -284,6 +311,15 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c) if (ret) return -EINVAL; + info->role_sw = usb_role_switch_get(info->dev); + if (IS_ERR(info->role_sw)) + return dev_err_probe(info->dev, PTR_ERR(info->role_sw), + "failed to get role switch\n"); + + ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info); + if (ret) + return ret; + /* * Update current extcon state if for example OTG connection was there * before the probe diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c index 93da2d8379b1..f706f5288257 100644 --- a/drivers/extcon/extcon-sm5502.c +++ b/drivers/extcon/extcon-sm5502.c @@ -798,6 +798,7 @@ static const struct sm5502_type sm5504_data = { static const struct of_device_id sm5502_dt_match[] = { { .compatible = "siliconmitus,sm5502-muic", .data = &sm5502_data }, { .compatible = "siliconmitus,sm5504-muic", .data = &sm5504_data }, + { .compatible = "siliconmitus,sm5703-muic", .data = &sm5502_data }, { }, }; MODULE_DEVICE_TABLE(of, sm5502_dt_match); @@ -830,6 +831,7 @@ static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops, static const struct i2c_device_id sm5502_i2c_id[] = { { "sm5502", (kernel_ulong_t)&sm5502_data }, { "sm5504", (kernel_ulong_t)&sm5504_data }, + { "sm5703-muic", (kernel_ulong_t)&sm5502_data }, { } }; MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id); diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c index f2b65d967384..40d967a11e87 100644 --- a/drivers/extcon/extcon-usb-gpio.c +++ b/drivers/extcon/extcon-usb-gpio.c @@ -226,16 +226,6 @@ static int usb_extcon_suspend(struct device *dev) } } - /* - * We don't want to process any IRQs after this point - * as GPIOs used behind I2C subsystem might not be - * accessible until resume completes. So disable IRQ. - */ - if (info->id_gpiod) - disable_irq(info->id_irq); - if (info->vbus_gpiod) - disable_irq(info->vbus_irq); - if (!device_may_wakeup(dev)) pinctrl_pm_select_sleep_state(dev); @@ -267,11 +257,6 @@ static int usb_extcon_resume(struct device *dev) } } - if (info->id_gpiod) - enable_irq(info->id_irq); - if (info->vbus_gpiod) - enable_irq(info->vbus_irq); - queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, 0); diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c index 5290cc2d19d9..fde1db62be0d 100644 --- a/drivers/extcon/extcon-usbc-cros-ec.c +++ b/drivers/extcon/extcon-usbc-cros-ec.c @@ -68,7 +68,7 @@ static int cros_ec_pd_command(struct cros_ec_extcon_info *info, struct cros_ec_command *msg; int ret; - msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL); + msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL); if (!msg) return -ENOMEM; diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index a09e704fd0fa..d3a32b806499 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -399,6 +399,7 @@ static ssize_t cable_state_show(struct device *dev, /** * extcon_sync() - Synchronize the state for an external connector. * @edev: the extcon device + * @id: the unique id indicating an external connector * * Note that this function send a notification in order to synchronize * the state and property of an external connector. @@ -736,6 +737,9 @@ EXPORT_SYMBOL_GPL(extcon_set_property); /** * extcon_set_property_sync() - Set property of an external connector with sync. + * @edev: the extcon device + * @id: the unique id indicating an external connector + * @prop: the property id indicating an extcon property * @prop_val: the pointer including the new value of extcon property * * Note that when setting the property value of external connector, @@ -851,6 +855,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability); * @extcon_name: the extcon name provided with extcon_dev_register() * * Return the pointer of extcon device if success or ERR_PTR(err) if fail. + * NOTE: This function returns -EPROBE_DEFER so it may only be called from + * probe() functions. */ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { @@ -864,7 +870,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) if (!strcmp(sd->name, extcon_name)) goto out; } - sd = NULL; + sd = ERR_PTR(-EPROBE_DEFER); out: mutex_unlock(&extcon_dev_list_lock); return sd; @@ -1218,19 +1224,14 @@ int extcon_dev_register(struct extcon_dev *edev) edev->dev.type = &edev->extcon_dev_type; } - ret = device_register(&edev->dev); - if (ret) { - put_device(&edev->dev); - goto err_dev; - } - spin_lock_init(&edev->lock); - edev->nh = devm_kcalloc(&edev->dev, edev->max_supported, - sizeof(*edev->nh), GFP_KERNEL); - if (!edev->nh) { - ret = -ENOMEM; - device_unregister(&edev->dev); - goto err_dev; + if (edev->max_supported) { + edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh), + GFP_KERNEL); + if (!edev->nh) { + ret = -ENOMEM; + goto err_alloc_nh; + } } for (index = 0; index < edev->max_supported; index++) @@ -1241,6 +1242,12 @@ int extcon_dev_register(struct extcon_dev *edev) dev_set_drvdata(&edev->dev, edev); edev->state = 0; + ret = device_register(&edev->dev); + if (ret) { + put_device(&edev->dev); + goto err_dev; + } + mutex_lock(&extcon_dev_list_lock); list_add(&edev->entry, &extcon_dev_list); mutex_unlock(&extcon_dev_list_lock); @@ -1249,6 +1256,9 @@ int extcon_dev_register(struct extcon_dev *edev) err_dev: if (edev->max_supported) + kfree(edev->nh); +err_alloc_nh: + if (edev->max_supported) kfree(edev->extcon_dev_type.groups); err_alloc_groups: if (edev->max_supported && edev->mutually_exclusive) { @@ -1308,6 +1318,7 @@ void extcon_dev_unregister(struct extcon_dev *edev) if (edev->max_supported) { kfree(edev->extcon_dev_type.groups); kfree(edev->cables); + kfree(edev->nh); } put_device(&edev->dev); diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index 3a353776bd34..66727ad3361b 100644 --- a/drivers/firmware/dmi-sysfs.c +++ b/drivers/firmware/dmi-sysfs.c @@ -604,7 +604,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh, "%d-%d", dh->type, entry->instance); if (*ret) { - kfree(entry); + kobject_put(&entry->kobj); return; } diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c index 69353dd0ea22..5cc238916551 100644 --- a/drivers/firmware/edd.c +++ b/drivers/firmware/edd.c @@ -685,8 +685,7 @@ static void edd_populate_dir(struct edd_device * edev) int i; for (i = 0; (attr = edd_attrs[i]) && !error; i++) { - if (!attr->test || - (attr->test && attr->test(edev))) + if (!attr->test || attr->test(edev)) error = sysfs_create_file(&edev->kobj,&attr->attr); } diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 8177a0fae11d..14663f671323 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -948,17 +948,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) { struct stratix10_svc_data_mem *pmem; - size_t size = 0; list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->vaddr == kaddr) { - size = pmem->size; - break; + gen_pool_free(chan->ctrl->genpool, + (unsigned long)kaddr, pmem->size); + pmem->vaddr = NULL; + list_del(&pmem->node); + return; } - gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size); - pmem->vaddr = NULL; - list_del(&pmem->node); + list_del(&svc_data_mem); } EXPORT_SYMBOL_GPL(stratix10_svc_free_memory); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index f21ece56695e..7977a494a651 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -36,8 +36,16 @@ /* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */ #define CRL_APB_BOOTPIN_CTRL_MASK 0xF0FU +/* IOCTL/QUERY feature payload size */ +#define FEATURE_PAYLOAD_SIZE 2 + +/* Firmware feature check version mask */ +#define FIRMWARE_VERSION_MASK GENMASK(15, 0) + static bool feature_check_enabled; static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); +static u32 ioctl_features[FEATURE_PAYLOAD_SIZE]; +static u32 query_features[FEATURE_PAYLOAD_SIZE]; static struct platform_device *em_dev; @@ -167,21 +175,28 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2, return zynqmp_pm_ret_code((enum pm_ret_status)res.a0); } -/** - * zynqmp_pm_feature() - Check weather given feature is supported or not - * @api_id: API ID to check - * - * Return: Returns status, either success or error+reason - */ -int zynqmp_pm_feature(const u32 api_id) +static int __do_feature_check_call(const u32 api_id, u32 *ret_payload) { int ret; - u32 ret_payload[PAYLOAD_ARG_CNT]; u64 smc_arg[2]; - struct pm_api_feature_data *feature_data; - if (!feature_check_enabled) - return 0; + smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK; + smc_arg[1] = api_id; + + ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload); + if (ret) + ret = -EOPNOTSUPP; + else + ret = ret_payload[1]; + + return ret; +} + +static int do_feature_check_call(const u32 api_id) +{ + int ret; + u32 ret_payload[PAYLOAD_ARG_CNT]; + struct pm_api_feature_data *feature_data; /* Check for existing entry in hash table for given api */ hash_for_each_possible(pm_api_features_map, feature_data, hentry, @@ -196,23 +211,86 @@ int zynqmp_pm_feature(const u32 api_id) return -ENOMEM; feature_data->pm_api_id = api_id; - smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK; - smc_arg[1] = api_id; - - ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload); - if (ret) - ret = -EOPNOTSUPP; - else - ret = ret_payload[1]; + ret = __do_feature_check_call(api_id, ret_payload); feature_data->feature_status = ret; hash_add(pm_api_features_map, &feature_data->hentry, api_id); + if (api_id == PM_IOCTL) + /* Store supported IOCTL IDs mask */ + memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4); + else if (api_id == PM_QUERY_DATA) + /* Store supported QUERY IDs mask */ + memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4); + return ret; } EXPORT_SYMBOL_GPL(zynqmp_pm_feature); /** + * zynqmp_pm_feature() - Check whether given feature is supported or not and + * store supported IOCTL/QUERY ID mask + * @api_id: API ID to check + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_feature(const u32 api_id) +{ + int ret; + + if (!feature_check_enabled) + return 0; + + ret = do_feature_check_call(api_id); + + return ret; +} + +/** + * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function + * is supported or not + * @api_id: PM_IOCTL or PM_QUERY_DATA + * @id: IOCTL or QUERY function IDs + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) +{ + int ret; + u32 *bit_mask; + + /* Input arguments validation */ + if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA)) + return -EINVAL; + + /* Check feature check API version */ + ret = do_feature_check_call(PM_FEATURE_CHECK); + if (ret < 0) + return ret; + + /* Check if feature check version 2 is supported or not */ + if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) { + /* + * Call feature check for IOCTL/QUERY API to get IOCTL ID or + * QUERY ID feature status. + */ + ret = do_feature_check_call(api_id); + if (ret < 0) + return ret; + + bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features; + + if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U) + return -EOPNOTSUPP; + } else { + return -ENODATA; + } + + return 0; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported); + +/** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration * @pm_api_id: Requested PM-API call @@ -1584,6 +1662,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) struct zynqmp_devinfo *devinfo; int ret; + ret = get_set_conduit_method(dev->of_node); + if (ret) + return ret; + np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp"); if (!np) { np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); @@ -1592,11 +1674,14 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) feature_check_enabled = true; } - of_node_put(np); - ret = get_set_conduit_method(dev->of_node); - if (ret) - return ret; + if (!feature_check_enabled) { + ret = do_feature_check_call(PM_FEATURE_CHECK); + if (ret >= 0) + feature_check_enabled = true; + } + + of_node_put(np); devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL); if (!devinfo) diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 0bff783d1b61..5935b3d0abd5 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -18,9 +18,9 @@ obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o -obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o -obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o -obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o +obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o +obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o +obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o # FPGA Bridge Drivers obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 717ac9715970..fd1fa55c9113 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -259,6 +259,15 @@ static int find_dfls_by_default(struct pci_dev *pcidev, */ bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v); offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v); + if (bar == FME_PORT_OFST_BAR_SKIP) { + continue; + } else if (bar >= PCI_STD_NUM_BARS) { + dev_err(&pcidev->dev, "bad BAR %d for port %d\n", + bar, i); + ret = -EINVAL; + break; + } + start = pci_resource_start(pcidev, bar) + offset; len = pci_resource_len(pcidev, bar) - offset; diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index 599bb21d86af..6bff39ff21a0 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -940,9 +940,12 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo, { void __iomem *base = binfo->ioaddr + ofst; unsigned int i, ibase, inr = 0; + enum dfl_id_type type; int virq; u64 v; + type = feature_dev_id_type(binfo->feature_dev); + /* * Ideally DFL framework should only read info from DFL header, but * current version DFL only provides mmio resources information for @@ -957,22 +960,25 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo, * code will be added. But in order to be compatible to old version * DFL, the driver may still fall back to these quirks. */ - switch (fid) { - case PORT_FEATURE_ID_UINT: - v = readq(base + PORT_UINT_CAP); - ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); - inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); - break; - case PORT_FEATURE_ID_ERROR: - v = readq(base + PORT_ERROR_CAP); - ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); - inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); - break; - case FME_FEATURE_ID_GLOBAL_ERR: - v = readq(base + FME_ERROR_CAP); - ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); - inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); - break; + if (type == PORT_ID) { + switch (fid) { + case PORT_FEATURE_ID_UINT: + v = readq(base + PORT_UINT_CAP); + ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); + inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); + break; + case PORT_FEATURE_ID_ERROR: + v = readq(base + PORT_ERROR_CAP); + ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); + inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); + break; + } + } else if (type == FME_ID) { + if (fid == FME_FEATURE_ID_GLOBAL_ERR) { + v = readq(base + FME_ERROR_CAP); + ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); + inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); + } } if (!inr) { diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index 53572c7aced0..06cfcd5e84bb 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -89,6 +89,7 @@ #define FME_HDR_NEXT_AFU NEXT_AFU #define FME_HDR_CAP 0x30 #define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8)) +#define FME_PORT_OFST_BAR_SKIP 7 #define FME_HDR_BITSTREAM_ID 0x60 #define FME_HDR_BITSTREAM_MD 0x68 diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index d49a9ce34568..a3595ecc3f79 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -148,11 +148,12 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr, int ret; mgr->state = FPGA_MGR_STATE_WRITE_INIT; - if (!mgr->mops->initial_header_size) + if (!mgr->mops->initial_header_size) { ret = fpga_mgr_write_init(mgr, info, NULL, 0); - else - ret = fpga_mgr_write_init( - mgr, info, buf, min(mgr->mops->initial_header_size, count)); + } else { + count = min(mgr->mops->initial_header_size, count); + ret = fpga_mgr_write_init(mgr, info, buf, count); + } if (ret) { dev_err(&mgr->dev, "Error preparing FPGA for writing\n"); @@ -730,6 +731,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) * @parent: fpga manager device from pdev * @info: parameters for fpga manager * + * Return: fpga manager pointer on success, negative error code otherwise. + * * This is the devres variant of fpga_mgr_register_full() for which the unregister * function will be called automatically when the managing device is detached. */ @@ -763,6 +766,8 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data * + * Return: fpga manager pointer on success, negative error code otherwise. + * * This is the devres variant of fpga_mgr_register() for which the * unregister function will be called automatically when the managing * device is detached. diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index b0ac18de4885..485948e3c0db 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -18,9 +18,9 @@ static DEFINE_IDA(fpga_region_ida); static struct class *fpga_region_class; -struct fpga_region *fpga_region_class_find( - struct device *start, const void *data, - int (*match)(struct device *, const void *)) +struct fpga_region * +fpga_region_class_find(struct device *start, const void *data, + int (*match)(struct device *, const void *)) { struct device *dev; diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index 50b83057c048..ae82532fc127 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -28,7 +28,7 @@ MODULE_DEVICE_TABLE(of, fpga_region_of_match); * * Caller will need to put_device(®ion->dev) when done. * - * Returns FPGA Region struct or NULL + * Return: FPGA Region struct or NULL */ static struct fpga_region *of_fpga_region_find(struct device_node *np) { @@ -80,7 +80,7 @@ static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np) * Caller should call fpga_bridges_put(®ion->bridge_list) when * done with the bridges. * - * Return 0 for success (even if there are no bridges specified) + * Return: 0 for success (even if there are no bridges specified) * or -EBUSY if any of the bridges are in use. */ static int of_fpga_region_get_bridges(struct fpga_region *region) @@ -139,13 +139,13 @@ static int of_fpga_region_get_bridges(struct fpga_region *region) } /** - * child_regions_with_firmware + * child_regions_with_firmware - Used to check the child region info. * @overlay: device node of the overlay * * If the overlay adds child FPGA regions, they are not allowed to have * firmware-name property. * - * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name. + * Return: 0 for OK or -EINVAL if child FPGA region adds firmware-name. */ static int child_regions_with_firmware(struct device_node *overlay) { @@ -184,14 +184,14 @@ static int child_regions_with_firmware(struct device_node *overlay) * Given an overlay applied to an FPGA region, parse the FPGA image specific * info in the overlay and do some checking. * - * Returns: + * Return: * NULL if overlay doesn't direct us to program the FPGA. * fpga_image_info struct if there is an image to program. * error code for invalid overlay. */ -static struct fpga_image_info *of_fpga_region_parse_ov( - struct fpga_region *region, - struct device_node *overlay) +static struct fpga_image_info * +of_fpga_region_parse_ov(struct fpga_region *region, + struct device_node *overlay) { struct device *dev = ®ion->dev; struct fpga_image_info *info; @@ -279,7 +279,7 @@ ret_no_info: * If the checks fail, overlay is rejected and does not get added to the * live tree. * - * Returns 0 for success or negative error code for failure. + * Return: 0 for success or negative error code for failure. */ static int of_fpga_region_notify_pre_apply(struct fpga_region *region, struct of_overlay_notify_data *nd) @@ -339,7 +339,7 @@ static void of_fpga_region_notify_post_remove(struct fpga_region *region, * This notifier handles programming an FPGA when a "firmware-name" property is * added to an fpga-region. * - * Returns NOTIFY_OK or error if FPGA programming fails. + * Return: NOTIFY_OK or error if FPGA programming fails. */ static int of_fpga_region_notify(struct notifier_block *nb, unsigned long action, void *arg) @@ -446,6 +446,8 @@ static struct platform_driver of_fpga_region_driver = { /** * of_fpga_region_init - init function for fpga_region class * Creates the fpga_region class and registers a reconfig notifier. + * + * Return: 0 on success, negative error code otherwise. */ static int __init of_fpga_region_init(void) { diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index af00dca8d1ac..ee6ce92ab4c3 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1379,7 +1379,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev) continue; conn->child_dev = coresight_find_csdev_by_fwnode(conn->child_fwnode); - if (conn->child_dev) { + if (conn->child_dev && conn->child_dev->has_conns_grp) { ret = coresight_make_links(csdev, conn, conn->child_dev); if (ret) @@ -1571,6 +1571,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) int nr_refcnts = 1; atomic_t *refcnts = NULL; struct coresight_device *csdev; + bool registered = false; csdev = kzalloc(sizeof(*csdev), GFP_KERNEL); if (!csdev) { @@ -1591,7 +1592,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL); if (!refcnts) { ret = -ENOMEM; - goto err_free_csdev; + kfree(csdev); + goto err_out; } csdev->refcnt = refcnts; @@ -1616,6 +1618,13 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev)); dev_set_name(&csdev->dev, "%s", desc->name); + /* + * Make sure the device registration and the connection fixup + * are synchronised, so that we don't see uninitialised devices + * on the coresight bus while trying to resolve the connections. + */ + mutex_lock(&coresight_mutex); + ret = device_register(&csdev->dev); if (ret) { put_device(&csdev->dev); @@ -1623,7 +1632,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) * All resources are free'd explicitly via * coresight_device_release(), triggered from put_device(). */ - goto err_out; + goto out_unlock; } if (csdev->type == CORESIGHT_DEV_TYPE_SINK || @@ -1638,11 +1647,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) * from put_device(), which is in turn called from * function device_unregister(). */ - goto err_out; + goto out_unlock; } } - - mutex_lock(&coresight_mutex); + /* Device is now registered */ + registered = true; ret = coresight_create_conns_sysfs_group(csdev); if (!ret) @@ -1652,16 +1661,18 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) if (!ret && cti_assoc_ops && cti_assoc_ops->add) cti_assoc_ops->add(csdev); +out_unlock: mutex_unlock(&coresight_mutex); - if (ret) { + /* Success */ + if (!ret) + return csdev; + + /* Unregister the device if needed */ + if (registered) { coresight_unregister(csdev); return ERR_PTR(ret); } - return csdev; - -err_free_csdev: - kfree(csdev); err_out: /* Cleanup the connection information */ coresight_release_platform_data(NULL, desc->pdata); diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 8845ec4b4402..1874df7c6a73 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -380,9 +380,10 @@ static int debug_notifier_call(struct notifier_block *self, int cpu; struct debug_drvdata *drvdata; - mutex_lock(&debug_lock); + /* Bail out if we can't acquire the mutex or the functionality is off */ + if (!mutex_trylock(&debug_lock)) + return NOTIFY_DONE; - /* Bail out if the functionality is disabled */ if (!debug_enable) goto skip_dump; @@ -401,7 +402,7 @@ static int debug_notifier_call(struct notifier_block *self, skip_dump: mutex_unlock(&debug_lock); - return 0; + return NOTIFY_DONE; } static struct notifier_block debug_notifier = { diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c index 7d413ba8b823..d0ab9933472b 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c @@ -204,7 +204,7 @@ void etm_set_default(struct etm_config *config) * set all bits in register 0x007, the ETMTECR2, to 0 * set register 0x008, the ETMTEEVR, to 0x6F (TRUE). */ - config->enable_ctrl1 = BIT(24); + config->enable_ctrl1 = ETMTECR1_INC_EXC; config->enable_ctrl2 = 0x0; config->enable_event = ETM_HARD_WIRE_RES_A; diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c index e8c7649f123e..68fcbf4ce7a8 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c @@ -474,7 +474,7 @@ static ssize_t addr_start_store(struct device *dev, config->addr_val[idx] = val; config->addr_type[idx] = ETM_ADDR_TYPE_START; config->startstop_ctrl |= (1 << idx); - config->enable_ctrl1 |= BIT(25); + config->enable_ctrl1 |= ETMTECR1_START_STOP; spin_unlock(&drvdata->spinlock); return size; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 7f416a12000e..87299e99dabb 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -443,7 +443,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) for (i = 0; i < drvdata->nr_ss_cmp; i++) { /* always clear status bit on restart if using single-shot */ if (config->ss_ctrl[i] || config->ss_pe_cmp[i]) - config->ss_status[i] &= ~BIT(31); + config->ss_status[i] &= ~TRCSSCSRn_STATUS; etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i)); etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i)); if (etm4x_sspcicrn_present(drvdata, i)) @@ -633,7 +633,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev, /* Go from generic option to ETMv4 specifics */ if (attr->config & BIT(ETM_OPT_CYCACC)) { - config->cfg |= BIT(4); + config->cfg |= TRCCONFIGR_CCI; /* TRM: Must program this for cycacc to work */ config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT; } @@ -653,14 +653,14 @@ static int etm4_parse_event_config(struct coresight_device *csdev, goto out; /* bit[11], Global timestamp tracing bit */ - config->cfg |= BIT(11); + config->cfg |= TRCCONFIGR_TS; } /* Only trace contextID when runs in root PID namespace */ if ((attr->config & BIT(ETM_OPT_CTXTID)) && task_is_in_init_pid_ns(current)) /* bit[6], Context ID tracing bit */ - config->cfg |= BIT(ETM4_CFG_BIT_CTXTID); + config->cfg |= TRCCONFIGR_CID; /* * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID @@ -672,17 +672,15 @@ static int etm4_parse_event_config(struct coresight_device *csdev, ret = -EINVAL; goto out; } - /* Only trace virtual contextID when runs in root PID namespace */ if (task_is_in_init_pid_ns(current)) - config->cfg |= BIT(ETM4_CFG_BIT_VMID) | - BIT(ETM4_CFG_BIT_VMID_OPT); + config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT; } /* return stack - enable if selected and supported */ if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack) /* bit[12], Return stack enable bit */ - config->cfg |= BIT(12); + config->cfg |= TRCCONFIGR_RS; /* * Set any selected configuration and preset. @@ -1097,107 +1095,67 @@ static void etm4_init_arch_data(void *info) etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0); /* INSTP0, bits[2:1] P0 tracing support field */ - if (BMVAL(etmidr0, 1, 2) == 0b11) - drvdata->instrp0 = true; - else - drvdata->instrp0 = false; - + drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11); /* TRCBB, bit[5] Branch broadcast tracing support bit */ - if (BMVAL(etmidr0, 5, 5)) - drvdata->trcbb = true; - else - drvdata->trcbb = false; - + drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB); /* TRCCOND, bit[6] Conditional instruction tracing support bit */ - if (BMVAL(etmidr0, 6, 6)) - drvdata->trccond = true; - else - drvdata->trccond = false; - + drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND); /* TRCCCI, bit[7] Cycle counting instruction bit */ - if (BMVAL(etmidr0, 7, 7)) - drvdata->trccci = true; - else - drvdata->trccci = false; - + drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI); /* RETSTACK, bit[9] Return stack bit */ - if (BMVAL(etmidr0, 9, 9)) - drvdata->retstack = true; - else - drvdata->retstack = false; - + drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK); /* NUMEVENT, bits[11:10] Number of events field */ - drvdata->nr_event = BMVAL(etmidr0, 10, 11); + drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0); /* QSUPP, bits[16:15] Q element support field */ - drvdata->q_support = BMVAL(etmidr0, 15, 16); + drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0); /* TSSIZE, bits[28:24] Global timestamp size field */ - drvdata->ts_size = BMVAL(etmidr0, 24, 28); + drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0); /* maximum size of resources */ etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2); /* CIDSIZE, bits[9:5] Indicates the Context ID size */ - drvdata->ctxid_size = BMVAL(etmidr2, 5, 9); + drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2); /* VMIDSIZE, bits[14:10] Indicates the VMID size */ - drvdata->vmid_size = BMVAL(etmidr2, 10, 14); + drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2); /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */ - drvdata->ccsize = BMVAL(etmidr2, 25, 28); + drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2); etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3); /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */ - drvdata->ccitmin = BMVAL(etmidr3, 0, 11); + drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3); /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */ - drvdata->s_ex_level = BMVAL(etmidr3, 16, 19); + drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3); drvdata->config.s_ex_level = drvdata->s_ex_level; /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */ - drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23); - + drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3); /* * TRCERR, bit[24] whether a trace unit can trace a * system error exception. */ - if (BMVAL(etmidr3, 24, 24)) - drvdata->trc_error = true; - else - drvdata->trc_error = false; - + drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR); /* SYNCPR, bit[25] implementation has a fixed synchronization period? */ - if (BMVAL(etmidr3, 25, 25)) - drvdata->syncpr = true; - else - drvdata->syncpr = false; - + drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR); /* STALLCTL, bit[26] is stall control implemented? */ - if (BMVAL(etmidr3, 26, 26)) - drvdata->stallctl = true; - else - drvdata->stallctl = false; - + drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL); /* SYSSTALL, bit[27] implementation can support stall control? */ - if (BMVAL(etmidr3, 27, 27)) - drvdata->sysstall = true; - else - drvdata->sysstall = false; - + drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL); /* * NUMPROC - the number of PEs available for tracing, 5bits * = TRCIDR3.bits[13:12]bits[30:28] * bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0) * bits[3:0] = TRCIDR3.bits[30:28] */ - drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30); - + drvdata->nr_pe = (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) | + FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3); /* NOOVERFLOW, bit[31] is trace overflow prevention supported */ - if (BMVAL(etmidr3, 31, 31)) - drvdata->nooverflow = true; - else - drvdata->nooverflow = false; + drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW); /* number of resources trace unit supports */ etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4); /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */ - drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3); + drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4); /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */ - drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15); + drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4); /* * NUMRSPAIR, bits[19:16] * The number of resource pairs conveyed by the HW starts at 0, i.e a @@ -1208,7 +1166,7 @@ static void etm4_init_arch_data(void *info) * the default TRUE and FALSE resource selectors are omitted. * Otherwise for values 0x1 and above the number is N + 1 as per v4.2. */ - drvdata->nr_resource = BMVAL(etmidr4, 16, 19); + drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4); if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0)) drvdata->nr_resource += 1; /* @@ -1216,45 +1174,39 @@ static void etm4_init_arch_data(void *info) * comparator control for tracing. Read any status regs as these * also contain RO capability data. */ - drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23); + drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4); for (i = 0; i < drvdata->nr_ss_cmp; i++) { drvdata->config.ss_status[i] = etm4x_relaxed_read32(csa, TRCSSCSRn(i)); } /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */ - drvdata->numcidc = BMVAL(etmidr4, 24, 27); + drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4); /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */ - drvdata->numvmidc = BMVAL(etmidr4, 28, 31); + drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4); etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5); /* NUMEXTIN, bits[8:0] number of external inputs implemented */ - drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8); + drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5); /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */ - drvdata->trcid_size = BMVAL(etmidr5, 16, 21); + drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5); /* ATBTRIG, bit[22] implementation can support ATB triggers? */ - if (BMVAL(etmidr5, 22, 22)) - drvdata->atbtrig = true; - else - drvdata->atbtrig = false; + drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG); /* * LPOVERRIDE, bit[23] implementation supports * low-power state override */ - if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up)) - drvdata->lpoverride = true; - else - drvdata->lpoverride = false; + drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up); /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */ - drvdata->nrseqstate = BMVAL(etmidr5, 25, 27); + drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5); /* NUMCNTR, bits[30:28] number of counters available for tracing */ - drvdata->nr_cntr = BMVAL(etmidr5, 28, 30); + drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5); etm4_cs_lock(drvdata, csa); cpu_detect_trace_filtering(drvdata); } static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config) { - return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT; + return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK); } /* Set ELx trace filter access in the TRCVICTLR register */ @@ -1280,7 +1232,7 @@ static void etm4_set_default_config(struct etmv4_config *config) config->ts_ctrl = 0x0; /* TRCVICTLR::EVENT = 0x01, select the always on logic */ - config->vinst_ctrl = BIT(0); + config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01); /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */ etm4_set_victlr_access(config); @@ -1389,7 +1341,7 @@ static void etm4_set_default_filter(struct etmv4_config *config) * TRCVICTLR::SSSTATUS == 1, the start-stop logic is * in the started state */ - config->vinst_ctrl |= BIT(9); + config->vinst_ctrl |= TRCVICTLR_SSSTATUS; config->mode |= ETM_MODE_VIEWINST_STARTSTOP; /* No start-stop filtering for ViewInst */ @@ -1493,7 +1445,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata, * TRCVICTLR::SSSTATUS == 1, the start-stop logic is * in the started state */ - config->vinst_ctrl |= BIT(9); + config->vinst_ctrl |= TRCVICTLR_SSSTATUS; /* No start-stop filtering for ViewInst */ config->vissctlr = 0x0; @@ -1521,7 +1473,7 @@ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata, * etm4_disable_perf(). */ if (filters->ssstatus) - config->vinst_ctrl |= BIT(9); + config->vinst_ctrl |= TRCVICTLR_SSSTATUS; /* No include/exclude filtering for ViewInst */ config->viiectlr = 0x0; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index 21687cc1e4e2..6ea8181816fc 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -22,7 +22,7 @@ static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) * TRCACATRn.TYPE bit[1:0]: type of comparison * the trace unit performs */ - if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { + if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) { if (idx % 2 != 0) return -EINVAL; @@ -180,12 +180,12 @@ static ssize_t reset_store(struct device *dev, /* Disable data tracing: do not trace load and store data transfers */ config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); - config->cfg &= ~(BIT(1) | BIT(2)); + config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE); /* Disable data value and data address tracing */ config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | ETM_MODE_DATA_TRACE_VAL); - config->cfg &= ~(BIT(16) | BIT(17)); + config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV); /* Disable all events tracing */ config->eventctrl0 = 0x0; @@ -206,11 +206,11 @@ static ssize_t reset_store(struct device *dev, * started state. ARM recommends start-stop logic is set before * each trace run. */ - config->vinst_ctrl = BIT(0); + config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01); if (drvdata->nr_addr_cmp > 0) { config->mode |= ETM_MODE_VIEWINST_STARTSTOP; /* SSSTATUS, bit[9] */ - config->vinst_ctrl |= BIT(9); + config->vinst_ctrl |= TRCVICTLR_SSSTATUS; } /* No address range filtering for ViewInst */ @@ -304,134 +304,134 @@ static ssize_t mode_store(struct device *dev, if (drvdata->instrp0 == true) { /* start by clearing instruction P0 field */ - config->cfg &= ~(BIT(1) | BIT(2)); + config->cfg &= ~TRCCONFIGR_INSTP0_LOAD_STORE; if (config->mode & ETM_MODE_LOAD) /* 0b01 Trace load instructions as P0 instructions */ - config->cfg |= BIT(1); + config->cfg |= TRCCONFIGR_INSTP0_LOAD; if (config->mode & ETM_MODE_STORE) /* 0b10 Trace store instructions as P0 instructions */ - config->cfg |= BIT(2); + config->cfg |= TRCCONFIGR_INSTP0_STORE; if (config->mode & ETM_MODE_LOAD_STORE) /* * 0b11 Trace load and store instructions * as P0 instructions */ - config->cfg |= BIT(1) | BIT(2); + config->cfg |= TRCCONFIGR_INSTP0_LOAD_STORE; } /* bit[3], Branch broadcast mode */ if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) - config->cfg |= BIT(3); + config->cfg |= TRCCONFIGR_BB; else - config->cfg &= ~BIT(3); + config->cfg &= ~TRCCONFIGR_BB; /* bit[4], Cycle counting instruction trace bit */ if ((config->mode & ETMv4_MODE_CYCACC) && (drvdata->trccci == true)) - config->cfg |= BIT(4); + config->cfg |= TRCCONFIGR_CCI; else - config->cfg &= ~BIT(4); + config->cfg &= ~TRCCONFIGR_CCI; /* bit[6], Context ID tracing bit */ if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) - config->cfg |= BIT(6); + config->cfg |= TRCCONFIGR_CID; else - config->cfg &= ~BIT(6); + config->cfg &= ~TRCCONFIGR_CID; if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) - config->cfg |= BIT(7); + config->cfg |= TRCCONFIGR_VMID; else - config->cfg &= ~BIT(7); + config->cfg &= ~TRCCONFIGR_VMID; /* bits[10:8], Conditional instruction tracing bit */ mode = ETM_MODE_COND(config->mode); if (drvdata->trccond == true) { - config->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); - config->cfg |= mode << 8; + config->cfg &= ~TRCCONFIGR_COND_MASK; + config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK); } /* bit[11], Global timestamp tracing bit */ if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) - config->cfg |= BIT(11); + config->cfg |= TRCCONFIGR_TS; else - config->cfg &= ~BIT(11); + config->cfg &= ~TRCCONFIGR_TS; /* bit[12], Return stack enable bit */ if ((config->mode & ETM_MODE_RETURNSTACK) && (drvdata->retstack == true)) - config->cfg |= BIT(12); + config->cfg |= TRCCONFIGR_RS; else - config->cfg &= ~BIT(12); + config->cfg &= ~TRCCONFIGR_RS; /* bits[14:13], Q element enable field */ mode = ETM_MODE_QELEM(config->mode); /* start by clearing QE bits */ - config->cfg &= ~(BIT(13) | BIT(14)); + config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS); /* * if supported, Q elements with instruction counts are enabled. * Always set the low bit for any requested mode. Valid combos are * 0b00, 0b01 and 0b11. */ if (mode && drvdata->q_support) - config->cfg |= BIT(13); + config->cfg |= TRCCONFIGR_QE_W_COUNTS; /* * if supported, Q elements with and without instruction * counts are enabled */ if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) - config->cfg |= BIT(14); + config->cfg |= TRCCONFIGR_QE_WO_COUNTS; /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ if ((config->mode & ETM_MODE_ATB_TRIGGER) && (drvdata->atbtrig == true)) - config->eventctrl1 |= BIT(11); + config->eventctrl1 |= TRCEVENTCTL1R_ATB; else - config->eventctrl1 &= ~BIT(11); + config->eventctrl1 &= ~TRCEVENTCTL1R_ATB; /* bit[12], Low-power state behavior override bit */ if ((config->mode & ETM_MODE_LPOVERRIDE) && (drvdata->lpoverride == true)) - config->eventctrl1 |= BIT(12); + config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE; else - config->eventctrl1 &= ~BIT(12); + config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE; /* bit[8], Instruction stall bit */ if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true)) - config->stall_ctrl |= BIT(8); + config->stall_ctrl |= TRCSTALLCTLR_ISTALL; else - config->stall_ctrl &= ~BIT(8); + config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL; /* bit[10], Prioritize instruction trace bit */ if (config->mode & ETM_MODE_INSTPRIO) - config->stall_ctrl |= BIT(10); + config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY; else - config->stall_ctrl &= ~BIT(10); + config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY; /* bit[13], Trace overflow prevention bit */ if ((config->mode & ETM_MODE_NOOVERFLOW) && (drvdata->nooverflow == true)) - config->stall_ctrl |= BIT(13); + config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW; else - config->stall_ctrl &= ~BIT(13); + config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW; /* bit[9] Start/stop logic control bit */ if (config->mode & ETM_MODE_VIEWINST_STARTSTOP) - config->vinst_ctrl |= BIT(9); + config->vinst_ctrl |= TRCVICTLR_SSSTATUS; else - config->vinst_ctrl &= ~BIT(9); + config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS; /* bit[10], Whether a trace unit must trace a Reset exception */ if (config->mode & ETM_MODE_TRACE_RESET) - config->vinst_ctrl |= BIT(10); + config->vinst_ctrl |= TRCVICTLR_TRCRESET; else - config->vinst_ctrl &= ~BIT(10); + config->vinst_ctrl &= ~TRCVICTLR_TRCRESET; /* bit[11], Whether a trace unit must trace a system error exception */ if ((config->mode & ETM_MODE_TRACE_ERR) && (drvdata->trc_error == true)) - config->vinst_ctrl |= BIT(11); + config->vinst_ctrl |= TRCVICTLR_TRCERR; else - config->vinst_ctrl &= ~BIT(11); + config->vinst_ctrl &= ~TRCVICTLR_TRCERR; if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) etm4_config_trace_mode(config); @@ -534,7 +534,7 @@ static ssize_t event_instren_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - val = BMVAL(config->eventctrl1, 0, 3); + val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -551,23 +551,28 @@ static ssize_t event_instren_store(struct device *dev, spin_lock(&drvdata->spinlock); /* start by clearing all instruction event enable bits */ - config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); + config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK; switch (drvdata->nr_event) { case 0x0: /* generate Event element for event 1 */ - config->eventctrl1 |= val & BIT(1); + config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1; break; case 0x1: /* generate Event element for event 1 and 2 */ - config->eventctrl1 |= val & (BIT(0) | BIT(1)); + config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1); break; case 0x2: /* generate Event element for event 1, 2 and 3 */ - config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); + config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | + TRCEVENTCTL1R_INSTEN_1 | + TRCEVENTCTL1R_INSTEN_2); break; case 0x3: /* generate Event element for all 4 events */ - config->eventctrl1 |= val & 0xF; + config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | + TRCEVENTCTL1R_INSTEN_1 | + TRCEVENTCTL1R_INSTEN_2 | + TRCEVENTCTL1R_INSTEN_3); break; default: break; @@ -702,10 +707,10 @@ static ssize_t bb_ctrl_store(struct device *dev, * individual range comparators. If include then at least 1 * range must be selected. */ - if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0)) + if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0)) return -EINVAL; - config->bb_ctrl = val & GENMASK(8, 0); + config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK); return size; } static DEVICE_ATTR_RW(bb_ctrl); @@ -718,7 +723,7 @@ static ssize_t event_vinst_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - val = config->vinst_ctrl & ETMv4_EVENT_MASK; + val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -734,9 +739,9 @@ static ssize_t event_vinst_store(struct device *dev, return -EINVAL; spin_lock(&drvdata->spinlock); - val &= ETMv4_EVENT_MASK; - config->vinst_ctrl &= ~ETMv4_EVENT_MASK; - config->vinst_ctrl |= val; + val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK); + config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK; + config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val); spin_unlock(&drvdata->spinlock); return size; } @@ -750,7 +755,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT; + val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -767,10 +772,10 @@ static ssize_t s_exlevel_vinst_store(struct device *dev, spin_lock(&drvdata->spinlock); /* clear all EXLEVEL_S bits */ - config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK); + config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK; /* enable instruction tracing for corresponding exception level */ val &= drvdata->s_ex_level; - config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT); + config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK); spin_unlock(&drvdata->spinlock); return size; } @@ -785,7 +790,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev, struct etmv4_config *config = &drvdata->config; /* EXLEVEL_NS, bits[23:20] */ - val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT; + val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -802,10 +807,10 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev, spin_lock(&drvdata->spinlock); /* clear EXLEVEL_NS bits */ - config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK); + config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK; /* enable instruction tracing for corresponding exception level */ val &= drvdata->ns_ex_level; - config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT); + config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK); spin_unlock(&drvdata->spinlock); return size; } @@ -858,11 +863,11 @@ static ssize_t addr_instdatatype_show(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->addr_idx; - val = BMVAL(config->addr_acc[idx], 0, 1); + val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]); len = scnprintf(buf, PAGE_SIZE, "%s\n", - val == ETM_INSTR_ADDR ? "instr" : - (val == ETM_DATA_LOAD_ADDR ? "data_load" : - (val == ETM_DATA_STORE_ADDR ? "data_store" : + val == TRCACATRn_TYPE_ADDR ? "instr" : + (val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" : + (val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" : "data_load_store"))); spin_unlock(&drvdata->spinlock); return len; @@ -886,7 +891,7 @@ static ssize_t addr_instdatatype_store(struct device *dev, idx = config->addr_idx; if (!strcmp(str, "instr")) /* TYPE, bits[1:0] */ - config->addr_acc[idx] &= ~(BIT(0) | BIT(1)); + config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK; spin_unlock(&drvdata->spinlock); return size; @@ -1144,7 +1149,7 @@ static ssize_t addr_ctxtype_show(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* CONTEXTTYPE, bits[3:2] */ - val = BMVAL(config->addr_acc[idx], 2, 3); + val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]); len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : (val == ETM_CTX_CTXID ? "ctxid" : (val == ETM_CTX_VMID ? "vmid" : "all"))); @@ -1170,18 +1175,18 @@ static ssize_t addr_ctxtype_store(struct device *dev, idx = config->addr_idx; if (!strcmp(str, "none")) /* start by clearing context type bits */ - config->addr_acc[idx] &= ~(BIT(2) | BIT(3)); + config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK; else if (!strcmp(str, "ctxid")) { /* 0b01 The trace unit performs a Context ID */ if (drvdata->numcidc) { - config->addr_acc[idx] |= BIT(2); - config->addr_acc[idx] &= ~BIT(3); + config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID; + config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID; } } else if (!strcmp(str, "vmid")) { /* 0b10 The trace unit performs a VMID */ if (drvdata->numvmidc) { - config->addr_acc[idx] &= ~BIT(2); - config->addr_acc[idx] |= BIT(3); + config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID; + config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID; } } else if (!strcmp(str, "all")) { /* @@ -1189,9 +1194,9 @@ static ssize_t addr_ctxtype_store(struct device *dev, * comparison and a VMID */ if (drvdata->numcidc) - config->addr_acc[idx] |= BIT(2); + config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID; if (drvdata->numvmidc) - config->addr_acc[idx] |= BIT(3); + config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID; } spin_unlock(&drvdata->spinlock); return size; @@ -1210,7 +1215,7 @@ static ssize_t addr_context_show(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* context ID comparator bits[6:4] */ - val = BMVAL(config->addr_acc[idx], 4, 6); + val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]); spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1235,8 +1240,8 @@ static ssize_t addr_context_store(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* clear context ID comparator bits[6:4] */ - config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); - config->addr_acc[idx] |= (val << 4); + config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK; + config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK); spin_unlock(&drvdata->spinlock); return size; } @@ -1253,7 +1258,7 @@ static ssize_t addr_exlevel_s_ns_show(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->addr_idx; - val = BMVAL(config->addr_acc[idx], 8, 14); + val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]); spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1270,14 +1275,14 @@ static ssize_t addr_exlevel_s_ns_store(struct device *dev, if (kstrtoul(buf, 0, &val)) return -EINVAL; - if (val & ~((GENMASK(14, 8) >> 8))) + if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK))) return -EINVAL; spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */ - config->addr_acc[idx] &= ~(GENMASK(14, 8)); - config->addr_acc[idx] |= (val << 8); + config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK; + config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK); spin_unlock(&drvdata->spinlock); return size; } @@ -1721,8 +1726,11 @@ static ssize_t res_ctrl_store(struct device *dev, /* For odd idx pair inversal bit is RES0 */ if (idx % 2 != 0) /* PAIRINV, bit[21] */ - val &= ~BIT(21); - config->res_ctrl[idx] = val & GENMASK(21, 0); + val &= ~TRCRSCTLRn_PAIRINV; + config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV | + TRCRSCTLRn_INV | + TRCRSCTLRn_GROUP_MASK | + TRCRSCTLRn_SELECT_MASK); spin_unlock(&drvdata->spinlock); return size; } @@ -1787,9 +1795,9 @@ static ssize_t sshot_ctrl_store(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->ss_idx; - config->ss_ctrl[idx] = val & GENMASK(24, 0); + config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val); /* must clear bit 31 in related status register on programming */ - config->ss_status[idx] &= ~BIT(31); + config->ss_status[idx] &= ~TRCSSCSRn_STATUS; spin_unlock(&drvdata->spinlock); return size; } @@ -1837,9 +1845,9 @@ static ssize_t sshot_pe_ctrl_store(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->ss_idx; - config->ss_pe_cmp[idx] = val & GENMASK(7, 0); + config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val); /* must clear bit 31 in related status register on programming */ - config->ss_status[idx] &= ~BIT(31); + config->ss_status[idx] &= ~TRCSSCSRn_STATUS; spin_unlock(&drvdata->spinlock); return size; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 3c4d69b096ca..33869c1d20c3 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -131,6 +131,104 @@ #define TRCRSR_TA BIT(12) /* + * Bit positions of registers that are defined above, in the sysreg.h style + * of _MASK for multi bit fields and BIT() for single bits. + */ +#define TRCIDR0_INSTP0_MASK GENMASK(2, 1) +#define TRCIDR0_TRCBB BIT(5) +#define TRCIDR0_TRCCOND BIT(6) +#define TRCIDR0_TRCCCI BIT(7) +#define TRCIDR0_RETSTACK BIT(9) +#define TRCIDR0_NUMEVENT_MASK GENMASK(11, 10) +#define TRCIDR0_QSUPP_MASK GENMASK(16, 15) +#define TRCIDR0_TSSIZE_MASK GENMASK(28, 24) + +#define TRCIDR2_CIDSIZE_MASK GENMASK(9, 5) +#define TRCIDR2_VMIDSIZE_MASK GENMASK(14, 10) +#define TRCIDR2_CCSIZE_MASK GENMASK(28, 25) + +#define TRCIDR3_CCITMIN_MASK GENMASK(11, 0) +#define TRCIDR3_EXLEVEL_S_MASK GENMASK(19, 16) +#define TRCIDR3_EXLEVEL_NS_MASK GENMASK(23, 20) +#define TRCIDR3_TRCERR BIT(24) +#define TRCIDR3_SYNCPR BIT(25) +#define TRCIDR3_STALLCTL BIT(26) +#define TRCIDR3_SYSSTALL BIT(27) +#define TRCIDR3_NUMPROC_LO_MASK GENMASK(30, 28) +#define TRCIDR3_NUMPROC_HI_MASK GENMASK(13, 12) +#define TRCIDR3_NOOVERFLOW BIT(31) + +#define TRCIDR4_NUMACPAIRS_MASK GENMASK(3, 0) +#define TRCIDR4_NUMPC_MASK GENMASK(15, 12) +#define TRCIDR4_NUMRSPAIR_MASK GENMASK(19, 16) +#define TRCIDR4_NUMSSCC_MASK GENMASK(23, 20) +#define TRCIDR4_NUMCIDC_MASK GENMASK(27, 24) +#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28) + +#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0) +#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16) +#define TRCIDR5_ATBTRIG BIT(22) +#define TRCIDR5_LPOVERRIDE BIT(23) +#define TRCIDR5_NUMSEQSTATE_MASK GENMASK(27, 25) +#define TRCIDR5_NUMCNTR_MASK GENMASK(30, 28) + +#define TRCCONFIGR_INSTP0_LOAD BIT(1) +#define TRCCONFIGR_INSTP0_STORE BIT(2) +#define TRCCONFIGR_INSTP0_LOAD_STORE (TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE) +#define TRCCONFIGR_BB BIT(3) +#define TRCCONFIGR_CCI BIT(4) +#define TRCCONFIGR_CID BIT(6) +#define TRCCONFIGR_VMID BIT(7) +#define TRCCONFIGR_COND_MASK GENMASK(10, 8) +#define TRCCONFIGR_TS BIT(11) +#define TRCCONFIGR_RS BIT(12) +#define TRCCONFIGR_QE_W_COUNTS BIT(13) +#define TRCCONFIGR_QE_WO_COUNTS BIT(14) +#define TRCCONFIGR_VMIDOPT BIT(15) +#define TRCCONFIGR_DA BIT(16) +#define TRCCONFIGR_DV BIT(17) + +#define TRCEVENTCTL1R_INSTEN_MASK GENMASK(3, 0) +#define TRCEVENTCTL1R_INSTEN_0 BIT(0) +#define TRCEVENTCTL1R_INSTEN_1 BIT(1) +#define TRCEVENTCTL1R_INSTEN_2 BIT(2) +#define TRCEVENTCTL1R_INSTEN_3 BIT(3) +#define TRCEVENTCTL1R_ATB BIT(11) +#define TRCEVENTCTL1R_LPOVERRIDE BIT(12) + +#define TRCSTALLCTLR_ISTALL BIT(8) +#define TRCSTALLCTLR_INSTPRIORITY BIT(10) +#define TRCSTALLCTLR_NOOVERFLOW BIT(13) + +#define TRCVICTLR_EVENT_MASK GENMASK(7, 0) +#define TRCVICTLR_SSSTATUS BIT(9) +#define TRCVICTLR_TRCRESET BIT(10) +#define TRCVICTLR_TRCERR BIT(11) +#define TRCVICTLR_EXLEVEL_MASK GENMASK(22, 16) +#define TRCVICTLR_EXLEVEL_S_MASK GENMASK(19, 16) +#define TRCVICTLR_EXLEVEL_NS_MASK GENMASK(22, 20) + +#define TRCACATRn_TYPE_MASK GENMASK(1, 0) +#define TRCACATRn_CONTEXTTYPE_MASK GENMASK(3, 2) +#define TRCACATRn_CONTEXTTYPE_CTXID BIT(2) +#define TRCACATRn_CONTEXTTYPE_VMID BIT(3) +#define TRCACATRn_CONTEXT_MASK GENMASK(6, 4) +#define TRCACATRn_EXLEVEL_MASK GENMASK(14, 8) + +#define TRCSSCSRn_STATUS BIT(31) +#define TRCSSCCRn_SAC_ARC_RST_MASK GENMASK(24, 0) + +#define TRCSSPCICRn_PC_MASK GENMASK(7, 0) + +#define TRCBBCTLR_MODE BIT(8) +#define TRCBBCTLR_RANGE_MASK GENMASK(7, 0) + +#define TRCRSCTLRn_PAIRINV BIT(21) +#define TRCRSCTLRn_INV BIT(20) +#define TRCRSCTLRn_GROUP_MASK GENMASK(19, 16) +#define TRCRSCTLRn_SELECT_MASK GENMASK(15, 0) + +/* * System instructions to access ETM registers. * See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions */ @@ -630,23 +728,9 @@ #define ETM_EXLEVEL_NS_OS BIT(5) /* NonSecure EL1 */ #define ETM_EXLEVEL_NS_HYP BIT(6) /* NonSecure EL2 */ -#define ETM_EXLEVEL_MASK (GENMASK(6, 0)) -#define ETM_EXLEVEL_S_MASK (GENMASK(3, 0)) -#define ETM_EXLEVEL_NS_MASK (GENMASK(6, 4)) - /* access level controls in TRCACATRn */ #define TRCACATR_EXLEVEL_SHIFT 8 -/* access level control in TRCVICTLR */ -#define TRCVICTLR_EXLEVEL_SHIFT 16 -#define TRCVICTLR_EXLEVEL_S_SHIFT 16 -#define TRCVICTLR_EXLEVEL_NS_SHIFT 20 - -/* secure / non secure masks - TRCVICTLR, IDR3 */ -#define TRCVICTLR_EXLEVEL_MASK (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT) -#define TRCVICTLR_EXLEVEL_S_MASK (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT) -#define TRCVICTLR_EXLEVEL_NS_MASK (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT) - #define ETM_TRCIDR1_ARCH_MAJOR_SHIFT 8 #define ETM_TRCIDR1_ARCH_MAJOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT) #define ETM_TRCIDR1_ARCH_MAJOR(x) \ @@ -986,10 +1070,10 @@ struct etmv4_drvdata { /* Address comparator access types */ enum etm_addr_acctype { - ETM_INSTR_ADDR, - ETM_DATA_LOAD_ADDR, - ETM_DATA_STORE_ADDR, - ETM_DATA_LOAD_STORE_ADDR, + TRCACATRn_TYPE_ADDR, + TRCACATRn_TYPE_DATA_LOAD_ADDR, + TRCACATRn_TYPE_DATA_STORE_ADDR, + TRCACATRn_TYPE_DATA_LOAD_STORE_ADDR, }; /* Address comparator context types */ diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index eac3f02662ae..b53f010f3e40 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -290,7 +290,6 @@ config DA311 config DMARD06 tristate "Domintech DMARD06 Digital Accelerometer Driver" - depends on OF || COMPILE_TEST depends on I2C help Say yes here to build support for the Domintech low-g tri-axial diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c index e9c10c8c32f0..7561399daef3 100644 --- a/drivers/iio/accel/adxl355_core.c +++ b/drivers/iio/accel/adxl355_core.c @@ -18,7 +18,7 @@ #include <linux/math64.h> #include <linux/module.h> #include <linux/mod_devicetable.h> -#include <linux/of_irq.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/units.h> @@ -745,10 +745,7 @@ int adxl355_core_probe(struct device *dev, struct regmap *regmap, return ret; } - /* - * TODO: Would be good to move it to the generic version. - */ - irq = of_irq_get_byname(dev->of_node, "DRDY"); + irq = fwnode_irq_get_byname(dev_fwnode(dev), "DRDY"); if (irq > 0) { ret = adxl355_probe_trigger(indio_dev, irq); if (ret) diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index 62960134ea19..0289ed8cf2c6 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -1567,7 +1567,6 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops, return ret; ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &adxl367_buffer_ops, adxl367_fifo_attributes); if (ret) diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 7516d7dde1af..57e8a8350cd1 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1525,7 +1525,7 @@ static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev) struct bmc150_accel_data *data = iio_priv(indio_dev); int ret = 0; - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) + if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED) return 0; mutex_lock(&data->mutex); @@ -1557,7 +1557,7 @@ static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev) { struct bmc150_accel_data *data = iio_priv(indio_dev); - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) + if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED) return 0; mutex_lock(&data->mutex); diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c index 53ab6078cb7f..cb0246ca72f3 100644 --- a/drivers/iio/accel/dmard09.c +++ b/drivers/iio/accel/dmard09.c @@ -24,7 +24,7 @@ #define DMARD09_AXIS_Y 1 #define DMARD09_AXIS_Z 2 #define DMARD09_AXIS_X_OFFSET ((DMARD09_AXIS_X + 1) * 2) -#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1 )* 2) +#define DMARD09_AXIS_Y_OFFSET ((DMARD09_AXIS_Y + 1) * 2) #define DMARD09_AXIS_Z_OFFSET ((DMARD09_AXIS_Z + 1) * 2) struct dmard09_data { diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index a9d2f10d5d45..8874d6d61725 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -1217,7 +1217,6 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq) return ret; ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &fxls8962af_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c index ec17e35e573e..b7b5af45429e 100644 --- a/drivers/iio/accel/kxsd9-spi.c +++ b/drivers/iio/accel/kxsd9-spi.c @@ -44,8 +44,8 @@ static const struct spi_device_id kxsd9_spi_id[] = { MODULE_DEVICE_TABLE(spi, kxsd9_spi_id); static const struct of_device_id kxsd9_of_match[] = { - { .compatible = "kionix,kxsd9" }, - { }, + { .compatible = "kionix,kxsd9" }, + { } }; MODULE_DEVICE_TABLE(of, kxsd9_of_match); diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 9c02c681c84c..912a447e6310 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -166,6 +166,7 @@ static const struct mma8452_event_regs trans_ev_regs = { /** * struct mma_chip_info - chip specific data + * @name: part number of device reported via 'name' attr * @chip_id: WHO_AM_I register's value * @channels: struct iio_chan_spec matching the device's * capabilities diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 83c81072511e..29a68a7d34cd 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -1474,7 +1474,6 @@ static int sca3000_probe(struct spi_device *spi) indio_dev->modes = INDIO_DIRECT_MODE; ret = devm_iio_kfifo_buffer_setup(&spi->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &sca3000_ring_setup_ops); if (ret) return ret; diff --git a/drivers/iio/accel/ssp_accel_sensor.c b/drivers/iio/accel/ssp_accel_sensor.c index a1164b439f41..7ca9d0d543e0 100644 --- a/drivers/iio/accel/ssp_accel_sensor.c +++ b/drivers/iio/accel/ssp_accel_sensor.c @@ -113,7 +113,6 @@ static int ssp_accel_probe(struct platform_device *pdev) indio_dev->available_scan_masks = ssp_accel_scan_mask; ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ssp_accel_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h index 00e056c21bfc..5b0f54e33d9e 100644 --- a/drivers/iio/accel/st_accel.h +++ b/drivers/iio/accel/st_accel.h @@ -14,32 +14,6 @@ #include <linux/types.h> #include <linux/iio/common/st_sensors.h> -enum st_accel_type { - LSM303DLH, - LSM303DLHC, - LIS3DH, - LSM330D, - LSM330DL, - LSM330DLC, - LIS331DLH, - LSM303DL, - LSM303DLM, - LSM330, - LSM303AGR, - LIS2DH12, - LIS3L02DQ, - LNG2DM, - H3LIS331DL, - LIS331DL, - LIS3LV02DL, - LIS2DW12, - LIS3DHH, - LIS2DE12, - LIS2HH12, - SC7A20, - ST_ACCEL_MAX, -}; - #define H3LIS331DL_ACCEL_DEV_NAME "h3lis331dl_accel" #define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel" #define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel" @@ -62,8 +36,10 @@ enum st_accel_type { #define LIS3DE_ACCEL_DEV_NAME "lis3de" #define LIS2DE12_ACCEL_DEV_NAME "lis2de12" #define LIS2HH12_ACCEL_DEV_NAME "lis2hh12" +#define LIS302DL_ACCEL_DEV_NAME "lis302dl" #define SC7A20_ACCEL_DEV_NAME "sc7a20" + #ifdef CONFIG_IIO_BUFFER int st_accel_allocate_ring(struct iio_dev *indio_dev); int st_accel_trig_set_state(struct iio_trigger *trig, bool state); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 5c5da6fdb490..c8c8eb15c34e 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -444,6 +444,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, .sensors_supported = { [0] = LIS331DL_ACCEL_DEV_NAME, + [1] = LIS302DL_ACCEL_DEV_NAME, }, .ch = (struct iio_chan_spec *)st_accel_8bit_channels, .odr = { @@ -1209,28 +1210,21 @@ read_error: static int st_accel_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { - int err; - switch (mask) { case IIO_CHAN_INFO_SCALE: { int gain; gain = val * 1000000 + val2; - err = st_sensors_set_fullscale_by_gain(indio_dev, gain); - break; + return st_sensors_set_fullscale_by_gain(indio_dev, gain); } case IIO_CHAN_INFO_SAMP_FREQ: if (val2) return -EINVAL; - mutex_lock(&indio_dev->mlock); - err = st_sensors_set_odr(indio_dev, val); - mutex_unlock(&indio_dev->mlock); - return err; + + return st_sensors_set_odr(indio_dev, val); default: return -EINVAL; } - - return err; } static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL(); diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c index 96adc4344f4a..45ee0ddc133c 100644 --- a/drivers/iio/accel/st_accel_i2c.c +++ b/drivers/iio/accel/st_accel_i2c.c @@ -108,6 +108,10 @@ static const struct of_device_id st_accel_of_match[] = { .data = LIS2HH12_ACCEL_DEV_NAME, }, { + .compatible = "st,lis302dl", + .data = LIS302DL_ACCEL_DEV_NAME, + }, + { .compatible = "silan,sc7a20", .data = SC7A20_ACCEL_DEV_NAME, }, @@ -146,6 +150,7 @@ static const struct i2c_device_id st_accel_id_table[] = { { LIS3DE_ACCEL_DEV_NAME }, { LIS2DE12_ACCEL_DEV_NAME }, { LIS2HH12_ACCEL_DEV_NAME }, + { LIS302DL_ACCEL_DEV_NAME }, { SC7A20_ACCEL_DEV_NAME }, {}, }; diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c index 108b63d0146c..6c0917750288 100644 --- a/drivers/iio/accel/st_accel_spi.c +++ b/drivers/iio/accel/st_accel_spi.c @@ -92,6 +92,10 @@ static const struct of_device_id st_accel_of_match[] = { .compatible = "st,lis3de", .data = LIS3DE_ACCEL_DEV_NAME, }, + { + .compatible = "st,lis302dl", + .data = LIS302DL_ACCEL_DEV_NAME, + }, {} }; MODULE_DEVICE_TABLE(of, st_accel_of_match); @@ -147,6 +151,7 @@ static const struct spi_device_id st_accel_id_table[] = { { LIS2DW12_ACCEL_DEV_NAME }, { LIS3DHH_ACCEL_DEV_NAME }, { LIS3DE_ACCEL_DEV_NAME }, + { LIS302DL_ACCEL_DEV_NAME }, {}, }; MODULE_DEVICE_TABLE(spi, st_accel_id_table); diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 71ab0a06aa82..48ace7412874 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -910,7 +910,7 @@ config ROCKCHIP_SARADC config RZG2L_ADC tristate "Renesas RZ/G2L ADC driver" - depends on ARCH_R9A07G044 || COMPILE_TEST + depends on ARCH_RZG2L || COMPILE_TEST help Say yes here to build support for the ADC found in Renesas RZ/G2L family. diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index c47ead15f6e5..c5b785d8b241 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -43,6 +43,8 @@ #define AD7124_STATUS_POR_FLAG_MSK BIT(4) /* AD7124_ADC_CONTROL */ +#define AD7124_ADC_STATUS_EN_MSK BIT(10) +#define AD7124_ADC_STATUS_EN(x) FIELD_PREP(AD7124_ADC_STATUS_EN_MSK, x) #define AD7124_ADC_CTRL_REF_EN_MSK BIT(8) #define AD7124_ADC_CTRL_REF_EN(x) FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x) #define AD7124_ADC_CTRL_PWR_MSK GENMASK(7, 6) @@ -188,7 +190,6 @@ static const struct iio_chan_spec ad7124_channel_template = { .sign = 'u', .realbits = 24, .storagebits = 32, - .shift = 8, .endianness = IIO_BE, }, }; @@ -501,26 +502,70 @@ static int ad7124_prepare_read(struct ad7124_state *st, int address) return ad7124_enable_channel(st, &st->channels[address]); } +static int __ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel) +{ + struct ad7124_state *st = container_of(sd, struct ad7124_state, sd); + + return ad7124_prepare_read(st, channel); +} + static int ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel) { struct ad7124_state *st = container_of(sd, struct ad7124_state, sd); int ret; mutex_lock(&st->cfgs_lock); - ret = ad7124_prepare_read(st, channel); + ret = __ad7124_set_channel(sd, channel); mutex_unlock(&st->cfgs_lock); return ret; } +static int ad7124_append_status(struct ad_sigma_delta *sd, bool append) +{ + struct ad7124_state *st = container_of(sd, struct ad7124_state, sd); + unsigned int adc_control = st->adc_control; + int ret; + + adc_control &= ~AD7124_ADC_STATUS_EN_MSK; + adc_control |= AD7124_ADC_STATUS_EN(append); + + ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, adc_control); + if (ret < 0) + return ret; + + st->adc_control = adc_control; + + return 0; +} + +static int ad7124_disable_all(struct ad_sigma_delta *sd) +{ + struct ad7124_state *st = container_of(sd, struct ad7124_state, sd); + int ret; + int i; + + for (i = 0; i < st->num_channels; i++) { + ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, 0, 2); + if (ret < 0) + return ret; + } + + return 0; +} + static const struct ad_sigma_delta_info ad7124_sigma_delta_info = { .set_channel = ad7124_set_channel, + .append_status = ad7124_append_status, + .disable_all = ad7124_disable_all, .set_mode = ad7124_set_mode, .has_registers = true, .addr_shift = 0, .read_mask = BIT(6), + .status_ch_mask = GENMASK(3, 0), .data_reg = AD7124_DATA, - .irq_flags = IRQF_TRIGGER_FALLING + .num_slots = 8, + .irq_flags = IRQF_TRIGGER_FALLING, }; static int ad7124_read_raw(struct iio_dev *indio_dev, @@ -670,11 +715,40 @@ static const struct attribute_group ad7124_attrs_group = { .attrs = ad7124_attributes, }; +static int ad7124_update_scan_mode(struct iio_dev *indio_dev, + const unsigned long *scan_mask) +{ + struct ad7124_state *st = iio_priv(indio_dev); + bool bit_set; + int ret; + int i; + + mutex_lock(&st->cfgs_lock); + for (i = 0; i < st->num_channels; i++) { + bit_set = test_bit(i, scan_mask); + if (bit_set) + ret = __ad7124_set_channel(&st->sd, i); + else + ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, + 0, 2); + if (ret < 0) { + mutex_unlock(&st->cfgs_lock); + + return ret; + } + } + + mutex_unlock(&st->cfgs_lock); + + return 0; +} + static const struct iio_info ad7124_info = { .read_raw = ad7124_read_raw, .write_raw = ad7124_write_raw, .debugfs_reg_access = &ad7124_reg_access, .validate_trigger = ad_sd_validate_trigger, + .update_scan_mode = ad7124_update_scan_mode, .attrs = &ad7124_attrs_group, }; @@ -886,12 +960,14 @@ static int ad7124_probe(struct spi_device *spi) st->chip_info = info; - ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info); - indio_dev->name = st->chip_info->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &ad7124_info; + ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7124_sigma_delta_info); + if (ret < 0) + return ret; + ret = ad7124_of_parse_channel_config(indio_dev, spi->dev.of_node); if (ret < 0) return ret; diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 770b4e59238f..d71977be7d22 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -58,7 +58,8 @@ /* Mode Register Bit Designations (AD7192_REG_MODE) */ #define AD7192_MODE_SEL(x) (((x) & 0x7) << 21) /* Operation Mode Select */ #define AD7192_MODE_SEL_MASK (0x7 << 21) /* Operation Mode Select Mask */ -#define AD7192_MODE_DAT_STA BIT(20) /* Status Register transmission */ +#define AD7192_MODE_STA(x) (((x) & 0x1) << 20) /* Status Register transmission */ +#define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */ #define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */ #define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */ #define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/ @@ -225,7 +226,7 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev, bool sys_calib; int ret, temp; - ret = strtobool(buf, &sys_calib); + ret = kstrtobool(buf, &sys_calib); if (ret) return ret; @@ -288,12 +289,51 @@ static int ad7192_set_mode(struct ad_sigma_delta *sd, return ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode); } +static int ad7192_append_status(struct ad_sigma_delta *sd, bool append) +{ + struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd); + unsigned int mode = st->mode; + int ret; + + mode &= ~AD7192_MODE_STA_MASK; + mode |= AD7192_MODE_STA(append); + + ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, mode); + if (ret < 0) + return ret; + + st->mode = mode; + + return 0; +} + +static int ad7192_disable_all(struct ad_sigma_delta *sd) +{ + struct ad7192_state *st = ad_sigma_delta_to_ad7192(sd); + u32 conf = st->conf; + int ret; + + conf &= ~AD7192_CONF_CHAN_MASK; + + ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf); + if (ret < 0) + return ret; + + st->conf = conf; + + return 0; +} + static const struct ad_sigma_delta_info ad7192_sigma_delta_info = { .set_channel = ad7192_set_channel, + .append_status = ad7192_append_status, + .disable_all = ad7192_disable_all, .set_mode = ad7192_set_mode, .has_registers = true, .addr_shift = 3, .read_mask = BIT(6), + .status_ch_mask = GENMASK(3, 0), + .num_slots = 4, .irq_flags = IRQF_TRIGGER_FALLING, }; @@ -457,7 +497,7 @@ static ssize_t ad7192_set(struct device *dev, int ret; bool val; - ret = strtobool(buf, &val); + ret = kstrtobool(buf, &val); if (ret < 0) return ret; @@ -783,6 +823,26 @@ static int ad7192_read_avail(struct iio_dev *indio_dev, return -EINVAL; } +static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) +{ + struct ad7192_state *st = iio_priv(indio_dev); + u32 conf = st->conf; + int ret; + int i; + + conf &= ~AD7192_CONF_CHAN_MASK; + for_each_set_bit(i, scan_mask, 8) + conf |= AD7192_CONF_CHAN(i); + + ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf); + if (ret < 0) + return ret; + + st->conf = conf; + + return 0; +} + static const struct iio_info ad7192_info = { .read_raw = ad7192_read_raw, .write_raw = ad7192_write_raw, @@ -790,6 +850,7 @@ static const struct iio_info ad7192_info = { .read_avail = ad7192_read_avail, .attrs = &ad7192_attribute_group, .validate_trigger = ad_sd_validate_trigger, + .update_scan_mode = ad7192_update_scan_mode, }; static const struct iio_info ad7195_info = { @@ -799,6 +860,7 @@ static const struct iio_info ad7195_info = { .read_avail = ad7192_read_avail, .attrs = &ad7195_attribute_group, .validate_trigger = ad_sd_validate_trigger, + .update_scan_mode = ad7192_update_scan_mode, }; #define __AD719x_CHANNEL(_si, _channel1, _channel2, _address, _extend_name, \ diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index c17d9b5fbaf6..f20d39f0bc01 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -378,6 +378,11 @@ static const char * const ad7266_gpio_labels[] = { "ad0", "ad1", "ad2", }; +static void ad7266_reg_disable(void *reg) +{ + regulator_disable(reg); +} + static int ad7266_probe(struct spi_device *spi) { struct ad7266_platform_data *pdata = spi->dev.platform_data; @@ -398,9 +403,13 @@ static int ad7266_probe(struct spi_device *spi) if (ret) return ret; + ret = devm_add_action_or_reset(&spi->dev, ad7266_reg_disable, st->reg); + if (ret) + return ret; + ret = regulator_get_voltage(st->reg); if (ret < 0) - goto error_disable_reg; + return ret; st->vref_mv = ret / 1000; } else { @@ -423,7 +432,7 @@ static int ad7266_probe(struct spi_device *spi) GPIOD_OUT_LOW); if (IS_ERR(st->gpios[i])) { ret = PTR_ERR(st->gpios[i]); - goto error_disable_reg; + return ret; } } } @@ -433,7 +442,6 @@ static int ad7266_probe(struct spi_device *spi) st->mode = AD7266_MODE_DIFF; } - spi_set_drvdata(spi, indio_dev); st->spi = spi; indio_dev->name = spi_get_device_id(spi)->name; @@ -459,35 +467,12 @@ static int ad7266_probe(struct spi_device *spi) spi_message_add_tail(&st->single_xfer[1], &st->single_msg); spi_message_add_tail(&st->single_xfer[2], &st->single_msg); - ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, &iio_pollfunc_store_time, &ad7266_trigger_handler, &iio_triggered_buffer_setup_ops); if (ret) - goto error_disable_reg; - - ret = iio_device_register(indio_dev); - if (ret) - goto error_buffer_cleanup; - - return 0; - -error_buffer_cleanup: - iio_triggered_buffer_cleanup(indio_dev); -error_disable_reg: - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); - - return ret; -} - -static void ad7266_remove(struct spi_device *spi) -{ - struct iio_dev *indio_dev = spi_get_drvdata(spi); - struct ad7266_state *st = iio_priv(indio_dev); + return ret; - iio_device_unregister(indio_dev); - iio_triggered_buffer_cleanup(indio_dev); - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); + return devm_iio_device_register(&spi->dev, indio_dev); } static const struct spi_device_id ad7266_id[] = { @@ -502,7 +487,6 @@ static struct spi_driver ad7266_driver = { .name = "ad7266", }, .probe = ad7266_probe, - .remove = ad7266_remove, .id_table = ad7266_id, }; module_spi_driver(ad7266_driver); diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c index ec9acbf12b9a..3bdf3d9422f2 100644 --- a/drivers/iio/adc/ad7280a.c +++ b/drivers/iio/adc/ad7280a.c @@ -488,7 +488,7 @@ static ssize_t ad7280_store_balance_sw(struct iio_dev *indio_dev, bool readin; int ret; - ret = strtobool(buf, &readin); + ret = kstrtobool(buf, &readin); if (ret) return ret; diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index ebcd52526cac..261a9a6b45e1 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -6,6 +6,7 @@ * Author: Lars-Peter Clausen <lars@metafoo.de> */ +#include <linux/align.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> @@ -342,15 +343,49 @@ EXPORT_SYMBOL_NS_GPL(ad_sigma_delta_single_conversion, IIO_AD_SIGMA_DELTA); static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) { struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); + unsigned int i, slot, samples_buf_size; unsigned int channel; + uint8_t *samples_buf; int ret; - channel = find_first_bit(indio_dev->active_scan_mask, - indio_dev->masklength); - ret = ad_sigma_delta_set_channel(sigma_delta, - indio_dev->channels[channel].address); - if (ret) - return ret; + if (sigma_delta->num_slots == 1) { + channel = find_first_bit(indio_dev->active_scan_mask, + indio_dev->masklength); + ret = ad_sigma_delta_set_channel(sigma_delta, + indio_dev->channels[channel].address); + if (ret) + return ret; + slot = 1; + } else { + /* + * At this point update_scan_mode already enabled the required channels. + * For sigma-delta sequencer drivers with multiple slots, an update_scan_mode + * implementation is mandatory. + */ + slot = 0; + for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) { + sigma_delta->slots[slot] = indio_dev->channels[i].address; + slot++; + } + } + + sigma_delta->active_slots = slot; + sigma_delta->current_slot = 0; + + if (sigma_delta->active_slots > 1) { + ret = ad_sigma_delta_append_status(sigma_delta, true); + if (ret) + return ret; + } + + samples_buf_size = ALIGN(slot * indio_dev->channels[0].scan_type.storagebits, 8); + samples_buf_size += sizeof(int64_t); + samples_buf = devm_krealloc(&sigma_delta->spi->dev, sigma_delta->samples_buf, + samples_buf_size, GFP_KERNEL); + if (!samples_buf) + return -ENOMEM; + + sigma_delta->samples_buf = samples_buf; spi_bus_lock(sigma_delta->spi->master); sigma_delta->bus_locked = true; @@ -386,6 +421,10 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev) sigma_delta->keep_cs_asserted = false; ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); + if (sigma_delta->status_appended) + ad_sigma_delta_append_status(sigma_delta, false); + + ad_sigma_delta_disable_all(sigma_delta); sigma_delta->bus_locked = false; return spi_bus_unlock(sigma_delta->spi->master); } @@ -396,6 +435,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); uint8_t *data = sigma_delta->rx_buf; + unsigned int transfer_size; + unsigned int sample_size; + unsigned int sample_pos; + unsigned int status_pos; unsigned int reg_size; unsigned int data_reg; @@ -408,21 +451,69 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) else data_reg = AD_SD_REG_DATA; + /* Status word will be appended to the sample during transfer */ + if (sigma_delta->status_appended) + transfer_size = reg_size + 1; + else + transfer_size = reg_size; + switch (reg_size) { case 4: case 2: case 1: - ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[0]); + status_pos = reg_size; + ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[0]); break; case 3: + /* + * Data array after transfer will look like (if status is appended): + * data[] = { [0][sample][sample][sample][status] } + * Keeping the first byte 0 shifts the status postion by 1 byte to the right. + */ + status_pos = reg_size + 1; + /* We store 24 bit samples in a 32 bit word. Keep the upper * byte set to zero. */ - ad_sd_read_reg_raw(sigma_delta, data_reg, reg_size, &data[1]); + ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]); break; } - iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp); + /* + * For devices sampling only one channel at + * once, there is no need for sample number tracking. + */ + if (sigma_delta->active_slots == 1) { + iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp); + goto irq_handled; + } + + if (sigma_delta->status_appended) { + u8 converted_channel; + + converted_channel = data[status_pos] & sigma_delta->info->status_ch_mask; + if (converted_channel != sigma_delta->slots[sigma_delta->current_slot]) { + /* + * Desync occurred during continuous sampling of multiple channels. + * Drop this incomplete sample and start from first channel again. + */ + + sigma_delta->current_slot = 0; + goto irq_handled; + } + } + + sample_size = indio_dev->channels[0].scan_type.storagebits / 8; + sample_pos = sample_size * sigma_delta->current_slot; + memcpy(&sigma_delta->samples_buf[sample_pos], data, sample_size); + sigma_delta->current_slot++; + if (sigma_delta->current_slot == sigma_delta->active_slots) { + sigma_delta->current_slot = 0; + iio_push_to_buffers_with_timestamp(indio_dev, sigma_delta->samples_buf, + pf->timestamp); + } + +irq_handled: iio_trigger_notify_done(indio_dev->trig); sigma_delta->irq_dis = false; enable_irq(sigma_delta->spi->irq); @@ -430,10 +521,17 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) return IRQ_HANDLED; } +static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned long *mask) +{ + struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); + + return bitmap_weight(mask, indio_dev->masklength) <= sigma_delta->num_slots; +} + static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = { .postenable = &ad_sd_buffer_postenable, .postdisable = &ad_sd_buffer_postdisable, - .validate_scan_mask = &iio_validate_scan_mask_onehot, + .validate_scan_mask = &ad_sd_validate_scan_mask, }; static irqreturn_t ad_sd_data_rdy_trig_poll(int irq, void *private) @@ -513,8 +611,14 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de */ int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev) { + struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev); int ret; + sigma_delta->slots = devm_kcalloc(dev, sigma_delta->num_slots, + sizeof(*sigma_delta->slots), GFP_KERNEL); + if (!sigma_delta->slots) + return -ENOMEM; + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, &iio_pollfunc_store_time, &ad_sd_trigger_handler, @@ -541,6 +645,25 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev, { sigma_delta->spi = spi; sigma_delta->info = info; + + /* If the field is unset in ad_sigma_delta_info, asume there can only be 1 slot. */ + if (!info->num_slots) + sigma_delta->num_slots = 1; + else + sigma_delta->num_slots = info->num_slots; + + if (sigma_delta->num_slots > 1) { + if (!indio_dev->info->update_scan_mode) { + dev_err(&spi->dev, "iio_dev lacks update_scan_mode().\n"); + return -EINVAL; + } + + if (!info->disable_all) { + dev_err(&spi->dev, "ad_sigma_delta_info lacks disable_all().\n"); + return -EINVAL; + } + } + iio_device_set_drvdata(indio_dev, sigma_delta); return 0; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 854b1f81d807..b764823ce57e 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -1117,7 +1117,7 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev) return at91_adc_configure_touch(st, true); /* if we are not in triggered mode, we cannot enable the buffer. */ - if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES)) + if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES)) return -EINVAL; /* we continue with the triggered buffer */ @@ -1159,7 +1159,7 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev) return at91_adc_configure_touch(st, false); /* if we are not in triggered mode, nothing to do here */ - if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES)) + if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES)) return -EINVAL; /* diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 8d902a32a0fd..abad16803849 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -550,7 +550,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev, bool val; int ret; - ret = strtobool(buf, &val); + ret = kstrtobool(buf, &val); if (ret) return ret; @@ -1027,7 +1027,6 @@ static int ina2xx_probe(struct i2c_client *client, indio_dev->name = id->name; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ina2xx_setup_ops); if (ret) return ret; diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index 61e80bf3d05e..fd000345ec5c 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c @@ -376,7 +376,8 @@ static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc, adc->adc_info[adc_chan].gain_error; if (val < 0) { - dev_err(adc->dev, "Mismatch with calibration\n"); + if (val < -10) + dev_err(adc->dev, "Mismatch with calibration var = %d\n", val); return 0; } diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c index 00098caf6d9e..e9ff2d6a8a57 100644 --- a/drivers/iio/adc/sc27xx_adc.c +++ b/drivers/iio/adc/sc27xx_adc.c @@ -9,12 +9,16 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/slab.h> /* PMIC global registers definition */ -#define SC27XX_MODULE_EN 0xc08 +#define SC2730_MODULE_EN 0x1808 +#define SC2731_MODULE_EN 0xc08 #define SC27XX_MODULE_ADC_EN BIT(5) -#define SC27XX_ARM_CLK_EN 0xc10 +#define SC2721_ARM_CLK_EN 0xc0c +#define SC2730_ARM_CLK_EN 0x180c +#define SC2731_ARM_CLK_EN 0xc10 #define SC27XX_CLK_ADC_EN BIT(5) #define SC27XX_CLK_ADC_CLK_EN BIT(6) @@ -36,8 +40,10 @@ /* Bits and mask definition for SC27XX_ADC_CH_CFG register */ #define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0) -#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8) -#define SC27XX_ADC_SCALE_SHIFT 8 +#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9) +#define SC2721_ADC_SCALE_MASK BIT(5) +#define SC27XX_ADC_SCALE_SHIFT 9 +#define SC2721_ADC_SCALE_SHIFT 5 /* Bits definitions for SC27XX_ADC_INT_EN registers */ #define SC27XX_ADC_IRQ_EN BIT(0) @@ -67,8 +73,15 @@ #define SC27XX_RATIO_NUMERATOR_OFFSET 16 #define SC27XX_RATIO_DENOMINATOR_MASK GENMASK(15, 0) +/* ADC specific channel reference voltage 3.5V */ +#define SC27XX_ADC_REFVOL_VDD35 3500000 + +/* ADC default channel reference voltage is 2.8V */ +#define SC27XX_ADC_REFVOL_VDD28 2800000 + struct sc27xx_adc_data { struct device *dev; + struct regulator *volref; struct regmap *regmap; /* * One hardware spinlock to synchronize between the multiple @@ -78,6 +91,24 @@ struct sc27xx_adc_data { int channel_scale[SC27XX_ADC_CHANNEL_MAX]; u32 base; int irq; + const struct sc27xx_adc_variant_data *var_data; +}; + +/* + * Since different PMICs of SC27xx series can have different + * address and ratio, we should save ratio config and base + * in the device data structure. + */ +struct sc27xx_adc_variant_data { + u32 module_en; + u32 clk_en; + u32 scale_shift; + u32 scale_mask; + const struct sc27xx_adc_linear_graph *bscale_cal; + const struct sc27xx_adc_linear_graph *sscale_cal; + void (*init_scale)(struct sc27xx_adc_data *data); + int (*get_ratio)(int channel, int scale); + bool set_volref; }; struct sc27xx_adc_linear_graph { @@ -103,6 +134,16 @@ static struct sc27xx_adc_linear_graph small_scale_graph = { 100, 341, }; +static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = { + 4200, 850, + 3600, 728, +}; + +static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = { + 1000, 838, + 100, 84, +}; + static const struct sc27xx_adc_linear_graph big_scale_graph_calib = { 4200, 856, 3600, 733, @@ -118,49 +159,225 @@ static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc) return ((calib_data & 0xff) + calib_adc - 128) * 4; } +/* get the adc nvmem cell calibration data */ +static int adc_nvmem_cell_calib_data(struct sc27xx_adc_data *data, const char *cell_name) +{ + struct nvmem_cell *cell; + void *buf; + u32 origin_calib_data = 0; + size_t len; + + if (!data) + return -EINVAL; + + cell = nvmem_cell_get(data->dev, cell_name); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) { + nvmem_cell_put(cell); + return PTR_ERR(buf); + } + + memcpy(&origin_calib_data, buf, min(len, sizeof(u32))); + + kfree(buf); + nvmem_cell_put(cell); + return origin_calib_data; +} + static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data, bool big_scale) { const struct sc27xx_adc_linear_graph *calib_graph; struct sc27xx_adc_linear_graph *graph; - struct nvmem_cell *cell; const char *cell_name; u32 calib_data = 0; - void *buf; - size_t len; if (big_scale) { - calib_graph = &big_scale_graph_calib; + calib_graph = data->var_data->bscale_cal; graph = &big_scale_graph; cell_name = "big_scale_calib"; } else { - calib_graph = &small_scale_graph_calib; + calib_graph = data->var_data->sscale_cal; graph = &small_scale_graph; cell_name = "small_scale_calib"; } - cell = nvmem_cell_get(data->dev, cell_name); - if (IS_ERR(cell)) - return PTR_ERR(cell); - - buf = nvmem_cell_read(cell, &len); - nvmem_cell_put(cell); - - if (IS_ERR(buf)) - return PTR_ERR(buf); - - memcpy(&calib_data, buf, min(len, sizeof(u32))); + calib_data = adc_nvmem_cell_calib_data(data, cell_name); /* Only need to calibrate the adc values in the linear graph. */ graph->adc0 = sc27xx_adc_get_calib_data(calib_data, calib_graph->adc0); graph->adc1 = sc27xx_adc_get_calib_data(calib_data >> 8, calib_graph->adc1); - kfree(buf); return 0; } -static int sc27xx_adc_get_ratio(int channel, int scale) +static int sc2720_adc_get_ratio(int channel, int scale) +{ + switch (channel) { + case 14: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(68, 900); + case 1: + return SC27XX_VOLT_RATIO(68, 1760); + case 2: + return SC27XX_VOLT_RATIO(68, 2327); + case 3: + return SC27XX_VOLT_RATIO(68, 3654); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + case 16: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(48, 100); + case 1: + return SC27XX_VOLT_RATIO(480, 1955); + case 2: + return SC27XX_VOLT_RATIO(480, 2586); + case 3: + return SC27XX_VOLT_RATIO(48, 406); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + case 21: + case 22: + case 23: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(3, 8); + case 1: + return SC27XX_VOLT_RATIO(375, 1955); + case 2: + return SC27XX_VOLT_RATIO(375, 2586); + case 3: + return SC27XX_VOLT_RATIO(300, 3248); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + default: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(1, 1); + case 1: + return SC27XX_VOLT_RATIO(1000, 1955); + case 2: + return SC27XX_VOLT_RATIO(1000, 2586); + case 3: + return SC27XX_VOLT_RATIO(100, 406); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + } + return SC27XX_VOLT_RATIO(1, 1); +} + +static int sc2721_adc_get_ratio(int channel, int scale) +{ + switch (channel) { + case 1: + case 2: + case 3: + case 4: + return scale ? SC27XX_VOLT_RATIO(400, 1025) : + SC27XX_VOLT_RATIO(1, 1); + case 5: + return SC27XX_VOLT_RATIO(7, 29); + case 7: + case 9: + return scale ? SC27XX_VOLT_RATIO(100, 125) : + SC27XX_VOLT_RATIO(1, 1); + case 14: + return SC27XX_VOLT_RATIO(68, 900); + case 16: + return SC27XX_VOLT_RATIO(48, 100); + case 19: + return SC27XX_VOLT_RATIO(1, 3); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + return SC27XX_VOLT_RATIO(1, 1); +} + +static int sc2730_adc_get_ratio(int channel, int scale) +{ + switch (channel) { + case 14: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(68, 900); + case 1: + return SC27XX_VOLT_RATIO(68, 1760); + case 2: + return SC27XX_VOLT_RATIO(68, 2327); + case 3: + return SC27XX_VOLT_RATIO(68, 3654); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + case 15: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(1, 3); + case 1: + return SC27XX_VOLT_RATIO(1000, 5865); + case 2: + return SC27XX_VOLT_RATIO(500, 3879); + case 3: + return SC27XX_VOLT_RATIO(500, 6090); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + case 16: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(48, 100); + case 1: + return SC27XX_VOLT_RATIO(480, 1955); + case 2: + return SC27XX_VOLT_RATIO(480, 2586); + case 3: + return SC27XX_VOLT_RATIO(48, 406); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + case 21: + case 22: + case 23: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(3, 8); + case 1: + return SC27XX_VOLT_RATIO(375, 1955); + case 2: + return SC27XX_VOLT_RATIO(375, 2586); + case 3: + return SC27XX_VOLT_RATIO(300, 3248); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + default: + switch (scale) { + case 0: + return SC27XX_VOLT_RATIO(1, 1); + case 1: + return SC27XX_VOLT_RATIO(1000, 1955); + case 2: + return SC27XX_VOLT_RATIO(1000, 2586); + case 3: + return SC27XX_VOLT_RATIO(1000, 4060); + default: + return SC27XX_VOLT_RATIO(1, 1); + } + } + return SC27XX_VOLT_RATIO(1, 1); +} + +static int sc2731_adc_get_ratio(int channel, int scale) { switch (channel) { case 1: @@ -185,10 +402,87 @@ static int sc27xx_adc_get_ratio(int channel, int scale) return SC27XX_VOLT_RATIO(1, 1); } +/* + * According to the datasheet set specific value on some channel. + */ +static void sc2720_adc_scale_init(struct sc27xx_adc_data *data) +{ + int i; + + for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) { + switch (i) { + case 5: + data->channel_scale[i] = 3; + break; + case 7: + case 9: + data->channel_scale[i] = 2; + break; + case 13: + data->channel_scale[i] = 1; + break; + case 19: + case 30: + case 31: + data->channel_scale[i] = 3; + break; + default: + data->channel_scale[i] = 0; + break; + } + } +} + +static void sc2730_adc_scale_init(struct sc27xx_adc_data *data) +{ + int i; + + for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) { + switch (i) { + case 5: + case 10: + case 19: + case 30: + case 31: + data->channel_scale[i] = 3; + break; + case 7: + case 9: + data->channel_scale[i] = 2; + break; + case 13: + data->channel_scale[i] = 1; + break; + default: + data->channel_scale[i] = 0; + break; + } + } +} + +static void sc2731_adc_scale_init(struct sc27xx_adc_data *data) +{ + int i; + /* + * In the current software design, SC2731 support 2 scales, + * channels 5 uses big scale, others use smale. + */ + for (i = 0; i < SC27XX_ADC_CHANNEL_MAX; i++) { + switch (i) { + case 5: + data->channel_scale[i] = 1; + break; + default: + data->channel_scale[i] = 0; + break; + } + } +} + static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel, int scale, int *val) { - int ret; + int ret, ret_volref; u32 tmp, value, status; ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT); @@ -197,10 +491,25 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel, return ret; } + /* + * According to the sc2721 chip data sheet, the reference voltage of + * specific channel 30 and channel 31 in ADC module needs to be set from + * the default 2.8v to 3.5v. + */ + if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) { + ret = regulator_set_voltage(data->volref, + SC27XX_ADC_REFVOL_VDD35, + SC27XX_ADC_REFVOL_VDD35); + if (ret) { + dev_err(data->dev, "failed to set the volref 3.5v\n"); + goto unlock_adc; + } + } + ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL, SC27XX_ADC_EN, SC27XX_ADC_EN); if (ret) - goto unlock_adc; + goto regulator_restore; ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR, SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR); @@ -208,10 +517,11 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel, goto disable_adc; /* Configure the channel id and scale */ - tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK; + tmp = (scale << data->var_data->scale_shift) & data->var_data->scale_mask; tmp |= channel & SC27XX_ADC_CHN_ID_MASK; ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CH_CFG, - SC27XX_ADC_CHN_ID_MASK | SC27XX_ADC_SCALE_MASK, + SC27XX_ADC_CHN_ID_MASK | + data->var_data->scale_mask, tmp); if (ret) goto disable_adc; @@ -249,6 +559,17 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel, disable_adc: regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL, SC27XX_ADC_EN, 0); +regulator_restore: + if ((data->var_data->set_volref) && (channel == 30 || channel == 31)) { + ret_volref = regulator_set_voltage(data->volref, + SC27XX_ADC_REFVOL_VDD28, + SC27XX_ADC_REFVOL_VDD28); + if (ret_volref) { + dev_err(data->dev, "failed to set the volref 2.8v,ret_volref = 0x%x\n", + ret_volref); + ret = ret || ret_volref; + } + } unlock_adc: hwspin_unlock_raw(data->hwlock); @@ -262,13 +583,14 @@ static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data, int channel, int scale, u32 *div_numerator, u32 *div_denominator) { - u32 ratio = sc27xx_adc_get_ratio(channel, scale); + u32 ratio; + ratio = data->var_data->get_ratio(channel, scale); *div_numerator = ratio >> SC27XX_RATIO_NUMERATOR_OFFSET; *div_denominator = ratio & SC27XX_RATIO_DENOMINATOR_MASK; } -static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph, +static int adc_to_volt(struct sc27xx_adc_linear_graph *graph, int raw_adc) { int tmp; @@ -277,6 +599,16 @@ static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph, tmp /= (graph->adc0 - graph->adc1); tmp += graph->volt1; + return tmp; +} + +static int sc27xx_adc_to_volt(struct sc27xx_adc_linear_graph *graph, + int raw_adc) +{ + int tmp; + + tmp = adc_to_volt(graph, raw_adc); + return tmp < 0 ? 0 : tmp; } @@ -432,13 +764,13 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data) { int ret; - ret = regmap_update_bits(data->regmap, SC27XX_MODULE_EN, + ret = regmap_update_bits(data->regmap, data->var_data->module_en, SC27XX_MODULE_ADC_EN, SC27XX_MODULE_ADC_EN); if (ret) return ret; /* Enable ADC work clock and controller clock */ - ret = regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN, + ret = regmap_update_bits(data->regmap, data->var_data->clk_en, SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN); if (ret) @@ -456,10 +788,10 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data) return 0; disable_clk: - regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN, + regmap_update_bits(data->regmap, data->var_data->clk_en, SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0); disable_adc: - regmap_update_bits(data->regmap, SC27XX_MODULE_EN, + regmap_update_bits(data->regmap, data->var_data->module_en, SC27XX_MODULE_ADC_EN, 0); return ret; @@ -470,21 +802,76 @@ static void sc27xx_adc_disable(void *_data) struct sc27xx_adc_data *data = _data; /* Disable ADC work clock and controller clock */ - regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN, + regmap_update_bits(data->regmap, data->var_data->clk_en, SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0); - regmap_update_bits(data->regmap, SC27XX_MODULE_EN, + regmap_update_bits(data->regmap, data->var_data->module_en, SC27XX_MODULE_ADC_EN, 0); } +static const struct sc27xx_adc_variant_data sc2731_data = { + .module_en = SC2731_MODULE_EN, + .clk_en = SC2731_ARM_CLK_EN, + .scale_shift = SC27XX_ADC_SCALE_SHIFT, + .scale_mask = SC27XX_ADC_SCALE_MASK, + .bscale_cal = &sc2731_big_scale_graph_calib, + .sscale_cal = &sc2731_small_scale_graph_calib, + .init_scale = sc2731_adc_scale_init, + .get_ratio = sc2731_adc_get_ratio, + .set_volref = false, +}; + +static const struct sc27xx_adc_variant_data sc2730_data = { + .module_en = SC2730_MODULE_EN, + .clk_en = SC2730_ARM_CLK_EN, + .scale_shift = SC27XX_ADC_SCALE_SHIFT, + .scale_mask = SC27XX_ADC_SCALE_MASK, + .bscale_cal = &big_scale_graph_calib, + .sscale_cal = &small_scale_graph_calib, + .init_scale = sc2730_adc_scale_init, + .get_ratio = sc2730_adc_get_ratio, + .set_volref = false, +}; + +static const struct sc27xx_adc_variant_data sc2721_data = { + .module_en = SC2731_MODULE_EN, + .clk_en = SC2721_ARM_CLK_EN, + .scale_shift = SC2721_ADC_SCALE_SHIFT, + .scale_mask = SC2721_ADC_SCALE_MASK, + .bscale_cal = &sc2731_big_scale_graph_calib, + .sscale_cal = &sc2731_small_scale_graph_calib, + .init_scale = sc2731_adc_scale_init, + .get_ratio = sc2721_adc_get_ratio, + .set_volref = true, +}; + +static const struct sc27xx_adc_variant_data sc2720_data = { + .module_en = SC2731_MODULE_EN, + .clk_en = SC2721_ARM_CLK_EN, + .scale_shift = SC27XX_ADC_SCALE_SHIFT, + .scale_mask = SC27XX_ADC_SCALE_MASK, + .bscale_cal = &big_scale_graph_calib, + .sscale_cal = &small_scale_graph_calib, + .init_scale = sc2720_adc_scale_init, + .get_ratio = sc2720_adc_get_ratio, + .set_volref = false, +}; + static int sc27xx_adc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct sc27xx_adc_data *sc27xx_data; + const struct sc27xx_adc_variant_data *pdata; struct iio_dev *indio_dev; int ret; + pdata = of_device_get_match_data(dev); + if (!pdata) { + dev_err(dev, "No matching driver data found\n"); + return -EINVAL; + } + indio_dev = devm_iio_device_alloc(dev, sizeof(*sc27xx_data)); if (!indio_dev) return -ENOMEM; @@ -520,6 +907,16 @@ static int sc27xx_adc_probe(struct platform_device *pdev) } sc27xx_data->dev = dev; + if (pdata->set_volref) { + sc27xx_data->volref = devm_regulator_get(dev, "vref"); + if (IS_ERR(sc27xx_data->volref)) { + ret = PTR_ERR(sc27xx_data->volref); + return dev_err_probe(dev, ret, "failed to get ADC volref\n"); + } + } + + sc27xx_data->var_data = pdata; + sc27xx_data->var_data->init_scale(sc27xx_data); ret = sc27xx_adc_enable(sc27xx_data); if (ret) { @@ -546,7 +943,10 @@ static int sc27xx_adc_probe(struct platform_device *pdev) } static const struct of_device_id sc27xx_adc_of_match[] = { - { .compatible = "sprd,sc2731-adc", }, + { .compatible = "sprd,sc2731-adc", .data = &sc2731_data}, + { .compatible = "sprd,sc2730-adc", .data = &sc2730_data}, + { .compatible = "sprd,sc2721-adc", .data = &sc2721_data}, + { .compatible = "sprd,sc2720-adc", .data = &sc2720_data}, { } }; MODULE_DEVICE_TABLE(of, sc27xx_adc_of_match); diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 9704cf0b9753..6d21ea84fa82 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -466,8 +466,7 @@ static int stm32_dfsdm_channels_configure(struct iio_dev *indio_dev, * In continuous mode, use fast mode configuration, * if it provides a better resolution. */ - if (adc->nconv == 1 && !trig && - (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE)) { + if (adc->nconv == 1 && !trig && iio_buffer_enabled(indio_dev)) { if (fl->flo[1].res >= fl->flo[0].res) { fl->fast = 1; flo = &fl->flo[1]; @@ -562,7 +561,7 @@ static int stm32_dfsdm_filter_configure(struct iio_dev *indio_dev, cr1 = DFSDM_CR1_RCH(chan->channel); /* Continuous conversions triggered by SPI clk in buffer mode */ - if (indio_dev->currentmode & INDIO_BUFFER_SOFTWARE) + if (iio_buffer_enabled(indio_dev)) cr1 |= DFSDM_CR1_RCONT(1); cr1 |= DFSDM_CR1_RSYNC(fl->sync_mode); diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c index d2d405388499..000e5cfecb43 100644 --- a/drivers/iio/adc/stmpe-adc.c +++ b/drivers/iio/adc/stmpe-adc.c @@ -61,7 +61,7 @@ struct stmpe_adc { static int stmpe_read_voltage(struct stmpe_adc *info, struct iio_chan_spec const *chan, int *val) { - long ret; + unsigned long ret; mutex_lock(&info->lock); @@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info, ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT); - if (ret <= 0) { + if (ret == 0) { stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA, STMPE_ADC_CH(info->channel)); mutex_unlock(&info->lock); @@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info, static int stmpe_read_temp(struct stmpe_adc *info, struct iio_chan_spec const *chan, int *val) { - long ret; + unsigned long ret; mutex_lock(&info->lock); @@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info, ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT); - if (ret <= 0) { + if (ret == 0) { mutex_unlock(&info->lock); return -ETIMEDOUT; } @@ -345,21 +345,22 @@ static int __maybe_unused stmpe_adc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume); +static const struct of_device_id stmpe_adc_ids[] = { + { .compatible = "st,stmpe-adc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, stmpe_adc_ids); + static struct platform_driver stmpe_adc_driver = { .probe = stmpe_adc_probe, .driver = { .name = "stmpe-adc", .pm = &stmpe_adc_pm_ops, + .of_match_table = stmpe_adc_ids, }, }; module_platform_driver(stmpe_adc_driver); -static const struct of_device_id stmpe_adc_ids[] = { - { .compatible = "st,stmpe-adc", }, - { }, -}; -MODULE_DEVICE_TABLE(of, stmpe_adc_ids); - MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>"); MODULE_DESCRIPTION("STMPEXXX ADC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 068efbce1710..5544da80b636 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -76,10 +76,15 @@ #define ADS1015_DEFAULT_DATA_RATE 4 #define ADS1015_DEFAULT_CHAN 0 -enum chip_ids { - ADSXXXX = 0, - ADS1015, - ADS1115, +struct ads1015_chip_data { + struct iio_chan_spec const *channels; + int num_channels; + const struct iio_info *info; + const int *data_rate; + const int data_rate_len; + const int *scale; + const int scale_len; + bool has_comparator; }; enum ads1015_channels { @@ -94,11 +99,11 @@ enum ads1015_channels { ADS1015_TIMESTAMP, }; -static const unsigned int ads1015_data_rate[] = { +static const int ads1015_data_rate[] = { 128, 250, 490, 920, 1600, 2400, 3300, 3300 }; -static const unsigned int ads1115_data_rate[] = { +static const int ads1115_data_rate[] = { 8, 16, 32, 64, 128, 250, 475, 860 }; @@ -106,10 +111,28 @@ static const unsigned int ads1115_data_rate[] = { * Translation from PGA bits to full-scale positive and negative input voltage * range in mV */ -static int ads1015_fullscale_range[] = { +static const int ads1015_fullscale_range[] = { 6144, 4096, 2048, 1024, 512, 256, 256, 256 }; +static const int ads1015_scale[] = { /* 12bit ADC */ + 256, 11, + 512, 11, + 1024, 11, + 2048, 11, + 4096, 11, + 6144, 11 +}; + +static const int ads1115_scale[] = { /* 16bit ADC */ + 256, 15, + 512, 15, + 1024, 15, + 2048, 15, + 4096, 15, + 6144, 15 +}; + /* * Translation from COMP_QUE field value to the number of successive readings * exceed the threshold values before an interrupt is generated @@ -134,71 +157,53 @@ static const struct iio_event_spec ads1015_events[] = { }, }; -#define ADS1015_V_CHAN(_chan, _addr) { \ - .type = IIO_VOLTAGE, \ - .indexed = 1, \ - .address = _addr, \ - .channel = _chan, \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - BIT(IIO_CHAN_INFO_SCALE) | \ - BIT(IIO_CHAN_INFO_SAMP_FREQ), \ - .scan_index = _addr, \ - .scan_type = { \ - .sign = 's', \ - .realbits = 12, \ - .storagebits = 16, \ - .shift = 4, \ - .endianness = IIO_CPU, \ - }, \ - .event_spec = ads1015_events, \ - .num_event_specs = ARRAY_SIZE(ads1015_events), \ - .datasheet_name = "AIN"#_chan, \ -} - -#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \ +/* + * Compile-time check whether _fitbits can accommodate up to _testbits + * bits. Returns _fitbits on success, fails to compile otherwise. + * + * The test works such that it multiplies constant _fitbits by constant + * double-negation of size of a non-empty structure, i.e. it multiplies + * constant _fitbits by constant 1 in each successful compilation case. + * The non-empty structure may contain C11 _Static_assert(), make use of + * this and place the kernel variant of static assert in there, so that + * it performs the compile-time check for _testbits <= _fitbits. Note + * that it is not possible to directly use static_assert in compound + * statements, hence this convoluted construct. + */ +#define FIT_CHECK(_testbits, _fitbits) \ + ( \ + (_fitbits) * \ + !!sizeof(struct { \ + static_assert((_testbits) <= (_fitbits)); \ + int pad; \ + }) \ + ) + +#define ADS1015_V_CHAN(_chan, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \ .type = IIO_VOLTAGE, \ - .differential = 1, \ .indexed = 1, \ .address = _addr, \ .channel = _chan, \ - .channel2 = _chan2, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ - .scan_index = _addr, \ - .scan_type = { \ - .sign = 's', \ - .realbits = 12, \ - .storagebits = 16, \ - .shift = 4, \ - .endianness = IIO_CPU, \ - }, \ - .event_spec = ads1015_events, \ - .num_event_specs = ARRAY_SIZE(ads1015_events), \ - .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \ -} - -#define ADS1115_V_CHAN(_chan, _addr) { \ - .type = IIO_VOLTAGE, \ - .indexed = 1, \ - .address = _addr, \ - .channel = _chan, \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + .info_mask_shared_by_all_available = \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = _addr, \ .scan_type = { \ .sign = 's', \ - .realbits = 16, \ - .storagebits = 16, \ + .realbits = (_realbits), \ + .storagebits = FIT_CHECK((_realbits) + (_shift), 16), \ + .shift = (_shift), \ .endianness = IIO_CPU, \ }, \ - .event_spec = ads1015_events, \ - .num_event_specs = ARRAY_SIZE(ads1015_events), \ + .event_spec = (_event_spec), \ + .num_event_specs = (_num_event_specs), \ .datasheet_name = "AIN"#_chan, \ } -#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) { \ +#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr, _realbits, _shift, _event_spec, _num_event_specs) { \ .type = IIO_VOLTAGE, \ .differential = 1, \ .indexed = 1, \ @@ -208,15 +213,19 @@ static const struct iio_event_spec ads1015_events[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .info_mask_shared_by_all_available = \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = _addr, \ .scan_type = { \ .sign = 's', \ - .realbits = 16, \ - .storagebits = 16, \ + .realbits = (_realbits), \ + .storagebits = FIT_CHECK((_realbits) + (_shift), 16), \ + .shift = (_shift), \ .endianness = IIO_CPU, \ }, \ - .event_spec = ads1015_events, \ - .num_event_specs = ARRAY_SIZE(ads1015_events), \ + .event_spec = (_event_spec), \ + .num_event_specs = (_num_event_specs), \ .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \ } @@ -245,7 +254,7 @@ struct ads1015_data { unsigned int comp_mode; struct ads1015_thresh_data thresh_data[ADS1015_CHANNELS]; - unsigned int *data_rate; + const struct ads1015_chip_data *chip; /* * Set to true when the ADC is switched to the continuous-conversion * mode and exits from a power-down state. This flag is used to avoid @@ -273,49 +282,91 @@ static void ads1015_event_channel_disable(struct ads1015_data *data, int chan) data->event_channel = ADS1015_CHANNELS; } -static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg) -{ - switch (reg) { - case ADS1015_CFG_REG: - case ADS1015_LO_THRESH_REG: - case ADS1015_HI_THRESH_REG: - return true; - default: - return false; - } -} +static const struct regmap_range ads1015_writeable_ranges[] = { + regmap_reg_range(ADS1015_CFG_REG, ADS1015_HI_THRESH_REG), +}; + +static const struct regmap_access_table ads1015_writeable_table = { + .yes_ranges = ads1015_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(ads1015_writeable_ranges), +}; static const struct regmap_config ads1015_regmap_config = { .reg_bits = 8, .val_bits = 16, .max_register = ADS1015_HI_THRESH_REG, - .writeable_reg = ads1015_is_writeable_reg, + .wr_table = &ads1015_writeable_table, +}; + +static const struct regmap_range tla2024_writeable_ranges[] = { + regmap_reg_range(ADS1015_CFG_REG, ADS1015_CFG_REG), +}; + +static const struct regmap_access_table tla2024_writeable_table = { + .yes_ranges = tla2024_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(tla2024_writeable_ranges), +}; + +static const struct regmap_config tla2024_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = ADS1015_CFG_REG, + .wr_table = &tla2024_writeable_table, }; static const struct iio_chan_spec ads1015_channels[] = { - ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1), - ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3), - ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3), - ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3), - ADS1015_V_CHAN(0, ADS1015_AIN0), - ADS1015_V_CHAN(1, ADS1015_AIN1), - ADS1015_V_CHAN(2, ADS1015_AIN2), - ADS1015_V_CHAN(3, ADS1015_AIN3), + ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4, + ads1015_events, ARRAY_SIZE(ads1015_events)), IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; static const struct iio_chan_spec ads1115_channels[] = { - ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1), - ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3), - ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3), - ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3), - ADS1115_V_CHAN(0, ADS1015_AIN0), - ADS1115_V_CHAN(1, ADS1015_AIN1), - ADS1115_V_CHAN(2, ADS1015_AIN2), - ADS1115_V_CHAN(3, ADS1015_AIN3), + ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(0, ADS1015_AIN0, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(1, ADS1015_AIN1, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(2, ADS1015_AIN2, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), + ADS1015_V_CHAN(3, ADS1015_AIN3, 16, 0, + ads1015_events, ARRAY_SIZE(ads1015_events)), IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; +static const struct iio_chan_spec tla2024_channels[] = { + ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1, 12, 4, NULL, 0), + ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3, 12, 4, NULL, 0), + ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3, 12, 4, NULL, 0), + ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3, 12, 4, NULL, 0), + ADS1015_V_CHAN(0, ADS1015_AIN0, 12, 4, NULL, 0), + ADS1015_V_CHAN(1, ADS1015_AIN1, 12, 4, NULL, 0), + ADS1015_V_CHAN(2, ADS1015_AIN2, 12, 4, NULL, 0), + ADS1015_V_CHAN(3, ADS1015_AIN3, 12, 4, NULL, 0), + IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), +}; + + #ifdef CONFIG_PM static int ads1015_set_power_state(struct ads1015_data *data, bool on) { @@ -344,6 +395,7 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) static int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) { + const int *data_rate = data->chip->data_rate; int ret, pga, dr, dr_old, conv_time; unsigned int old, mask, cfg; @@ -378,8 +430,8 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) } if (data->conv_invalid) { dr_old = (old & ADS1015_CFG_DR_MASK) >> ADS1015_CFG_DR_SHIFT; - conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]); - conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); + conv_time = DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr_old]); + conv_time += DIV_ROUND_UP(USEC_PER_SEC, data_rate[dr]); conv_time += conv_time / 10; /* 10% internal clock inaccuracy */ usleep_range(conv_time, conv_time + 1); data->conv_invalid = false; @@ -445,8 +497,8 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate) { int i; - for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) { - if (data->data_rate[i] == rate) { + for (i = 0; i < data->chip->data_rate_len; i++) { + if (data->chip->data_rate[i] == rate) { data->channel_data[chan].data_rate = i; return 0; } @@ -455,6 +507,32 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate) return -EINVAL; } +static int ads1015_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + struct ads1015_data *data = iio_priv(indio_dev); + + if (chan->type != IIO_VOLTAGE) + return -EINVAL; + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + *type = IIO_VAL_FRACTIONAL_LOG2; + *vals = data->chip->scale; + *length = data->chip->scale_len; + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SAMP_FREQ: + *type = IIO_VAL_INT; + *vals = data->chip->data_rate; + *length = data->chip->data_rate_len; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + static int ads1015_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -504,7 +582,7 @@ release_direct: break; case IIO_CHAN_INFO_SAMP_FREQ: idx = data->channel_data[chan->address].data_rate; - *val = data->data_rate[idx]; + *val = data->chip->data_rate[idx]; ret = IIO_VAL_INT; break; default: @@ -564,7 +642,7 @@ static int ads1015_read_event(struct iio_dev *indio_dev, dr = data->channel_data[chan->address].data_rate; comp_queue = data->thresh_data[chan->address].comp_queue; period = ads1015_comp_queue[comp_queue] * - USEC_PER_SEC / data->data_rate[dr]; + USEC_PER_SEC / data->chip->data_rate[dr]; *val = period / USEC_PER_SEC; *val2 = period % USEC_PER_SEC; @@ -586,6 +664,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev, int val2) { struct ads1015_data *data = iio_priv(indio_dev); + const int *data_rate = data->chip->data_rate; int realbits = chan->scan_type.realbits; int ret = 0; long long period; @@ -611,7 +690,7 @@ static int ads1015_write_event(struct iio_dev *indio_dev, for (i = 0; i < ARRAY_SIZE(ads1015_comp_queue) - 1; i++) { if (period <= ads1015_comp_queue[i] * - USEC_PER_SEC / data->data_rate[dr]) + USEC_PER_SEC / data_rate[dr]) break; } data->thresh_data[chan->address].comp_queue = i; @@ -802,54 +881,20 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = { .validate_scan_mask = &iio_validate_scan_mask_onehot, }; -static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available, - "3 2 1 0.5 0.25 0.125"); -static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available, - "0.1875 0.125 0.0625 0.03125 0.015625 0.007813"); - -static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available, - sampling_frequency_available, "128 250 490 920 1600 2400 3300"); -static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available, - sampling_frequency_available, "8 16 32 64 128 250 475 860"); - -static struct attribute *ads1015_attributes[] = { - &iio_const_attr_ads1015_scale_available.dev_attr.attr, - &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr, - NULL, -}; - -static const struct attribute_group ads1015_attribute_group = { - .attrs = ads1015_attributes, -}; - -static struct attribute *ads1115_attributes[] = { - &iio_const_attr_ads1115_scale_available.dev_attr.attr, - &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr, - NULL, -}; - -static const struct attribute_group ads1115_attribute_group = { - .attrs = ads1115_attributes, -}; - static const struct iio_info ads1015_info = { + .read_avail = ads1015_read_avail, .read_raw = ads1015_read_raw, .write_raw = ads1015_write_raw, .read_event_value = ads1015_read_event, .write_event_value = ads1015_write_event, .read_event_config = ads1015_read_event_config, .write_event_config = ads1015_write_event_config, - .attrs = &ads1015_attribute_group, }; -static const struct iio_info ads1115_info = { +static const struct iio_info tla2024_info = { + .read_avail = ads1015_read_avail, .read_raw = ads1015_read_raw, .write_raw = ads1015_write_raw, - .read_event_value = ads1015_read_event, - .write_event_value = ads1015_write_event, - .read_event_config = ads1015_read_event_config, - .write_event_config = ads1015_write_event_config, - .attrs = &ads1115_attribute_group, }; static int ads1015_client_get_channels_config(struct i2c_client *client) @@ -932,12 +977,18 @@ static int ads1015_set_conv_mode(struct ads1015_data *data, int mode) static int ads1015_probe(struct i2c_client *client, const struct i2c_device_id *id) { + const struct ads1015_chip_data *chip; struct iio_dev *indio_dev; struct ads1015_data *data; int ret; - enum chip_ids chip; int i; + chip = device_get_match_data(&client->dev); + if (!chip) + chip = (const struct ads1015_chip_data *)id->driver_data; + if (!chip) + return dev_err_probe(&client->dev, -EINVAL, "Unknown chip\n"); + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; @@ -950,28 +1001,12 @@ static int ads1015_probe(struct i2c_client *client, indio_dev->name = ADS1015_DRV_NAME; indio_dev->modes = INDIO_DIRECT_MODE; - chip = (uintptr_t)device_get_match_data(&client->dev); - if (chip == ADSXXXX) - chip = id->driver_data; - switch (chip) { - case ADS1015: - indio_dev->channels = ads1015_channels; - indio_dev->num_channels = ARRAY_SIZE(ads1015_channels); - indio_dev->info = &ads1015_info; - data->data_rate = (unsigned int *) &ads1015_data_rate; - break; - case ADS1115: - indio_dev->channels = ads1115_channels; - indio_dev->num_channels = ARRAY_SIZE(ads1115_channels); - indio_dev->info = &ads1115_info; - data->data_rate = (unsigned int *) &ads1115_data_rate; - break; - default: - dev_err(&client->dev, "Unknown chip %d\n", chip); - return -EINVAL; - } - + indio_dev->channels = chip->channels; + indio_dev->num_channels = chip->num_channels; + indio_dev->info = chip->info; + data->chip = chip; data->event_channel = ADS1015_CHANNELS; + /* * Set default lower and upper threshold to min and max value * respectively. @@ -986,7 +1021,9 @@ static int ads1015_probe(struct i2c_client *client, /* we need to keep this ABI the same as used by hwmon ADS1015 driver */ ads1015_get_channels_config(client); - data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config); + data->regmap = devm_regmap_init_i2c(client, chip->has_comparator ? + &ads1015_regmap_config : + &tla2024_regmap_config); if (IS_ERR(data->regmap)) { dev_err(&client->dev, "Failed to allocate register map\n"); return PTR_ERR(data->regmap); @@ -1000,7 +1037,7 @@ static int ads1015_probe(struct i2c_client *client, return ret; } - if (client->irq) { + if (client->irq && chip->has_comparator) { unsigned long irq_trig = irqd_get_trigger_type(irq_get_irq_data(client->irq)); unsigned int cfg_comp_mask = ADS1015_CFG_COMP_QUE_MASK | @@ -1099,22 +1136,51 @@ static const struct dev_pm_ops ads1015_pm_ops = { ads1015_runtime_resume, NULL) }; +static const struct ads1015_chip_data ads1015_data = { + .channels = ads1015_channels, + .num_channels = ARRAY_SIZE(ads1015_channels), + .info = &ads1015_info, + .data_rate = ads1015_data_rate, + .data_rate_len = ARRAY_SIZE(ads1015_data_rate), + .scale = ads1015_scale, + .scale_len = ARRAY_SIZE(ads1015_scale), + .has_comparator = true, +}; + +static const struct ads1015_chip_data ads1115_data = { + .channels = ads1115_channels, + .num_channels = ARRAY_SIZE(ads1115_channels), + .info = &ads1015_info, + .data_rate = ads1115_data_rate, + .data_rate_len = ARRAY_SIZE(ads1115_data_rate), + .scale = ads1115_scale, + .scale_len = ARRAY_SIZE(ads1115_scale), + .has_comparator = true, +}; + +static const struct ads1015_chip_data tla2024_data = { + .channels = tla2024_channels, + .num_channels = ARRAY_SIZE(tla2024_channels), + .info = &tla2024_info, + .data_rate = ads1015_data_rate, + .data_rate_len = ARRAY_SIZE(ads1015_data_rate), + .scale = ads1015_scale, + .scale_len = ARRAY_SIZE(ads1015_scale), + .has_comparator = false, +}; + static const struct i2c_device_id ads1015_id[] = { - {"ads1015", ADS1015}, - {"ads1115", ADS1115}, + { "ads1015", (kernel_ulong_t)&ads1015_data }, + { "ads1115", (kernel_ulong_t)&ads1115_data }, + { "tla2024", (kernel_ulong_t)&tla2024_data }, {} }; MODULE_DEVICE_TABLE(i2c, ads1015_id); static const struct of_device_id ads1015_of_match[] = { - { - .compatible = "ti,ads1015", - .data = (void *)ADS1015 - }, - { - .compatible = "ti,ads1115", - .data = (void *)ADS1115 - }, + { .compatible = "ti,ads1015", .data = &ads1015_data }, + { .compatible = "ti,ads1115", .data = &ads1115_data }, + { .compatible = "ti,tla2024", .data = &tla2024_data }, {} }; MODULE_DEVICE_TABLE(of, ads1015_of_match); diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 22c2583eedd0..708cca0a63be 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -508,6 +508,7 @@ MODULE_DEVICE_TABLE(of, ads8688_of_match); static struct spi_driver ads8688_driver = { .driver = { .name = "ads8688", + .of_match_table = ads8688_of_match, }, .probe = ads8688_probe, .remove = ads8688_remove, diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index dbdc1ef48566..567d43a30955 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -376,9 +376,7 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev, { int ret; - ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, - setup_ops); + ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops); if (ret) return ret; diff --git a/drivers/iio/afe/Kconfig b/drivers/iio/afe/Kconfig index 4fa397822cff..9a1d95c1c7ed 100644 --- a/drivers/iio/afe/Kconfig +++ b/drivers/iio/afe/Kconfig @@ -8,7 +8,6 @@ menu "Analog Front Ends" config IIO_RESCALE tristate "IIO rescale" - depends on OF || COMPILE_TEST help Say yes here to build support for the IIO rescaling that handles voltage dividers, current sense shunts and diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index 7e511293d6d1..c6cf709f0f05 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -10,9 +10,8 @@ #include <linux/err.h> #include <linux/gcd.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/property.h> @@ -536,7 +535,7 @@ static int rescale_probe(struct platform_device *pdev) rescale = iio_priv(indio_dev); - rescale->cfg = of_device_get_match_data(dev); + rescale->cfg = device_get_match_data(dev); rescale->numerator = 1; rescale->denominator = 1; rescale->offset = 0; diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 416d35a61ae2..35d8b4077376 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c @@ -259,8 +259,6 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev) * devm_iio_kfifo_buffer_setup_ext - Allocate a kfifo buffer & attach it to an IIO device * @dev: Device object to which to attach the life-time of this kfifo buffer * @indio_dev: The device the buffer should be attached to - * @mode_flags: The mode flags for this buffer (INDIO_BUFFER_SOFTWARE and/or - * INDIO_BUFFER_TRIGGERED). * @setup_ops: The setup_ops required to configure the HW part of the buffer (optional) * @buffer_attrs: Extra sysfs buffer attributes for this IIO buffer * @@ -271,22 +269,16 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev) */ int devm_iio_kfifo_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, - int mode_flags, const struct iio_buffer_setup_ops *setup_ops, const struct attribute **buffer_attrs) { struct iio_buffer *buffer; - if (!mode_flags) - return -EINVAL; - buffer = devm_iio_kfifo_allocate(dev); if (!buffer) return -ENOMEM; - mode_flags &= kfifo_access_funcs.modes; - - indio_dev->modes |= mode_flags; + indio_dev->modes |= INDIO_BUFFER_SOFTWARE; indio_dev->setup_ops = setup_ops; buffer->attrs = buffer_attrs; diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index b2725c6adc7f..5976aca48e3b 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -333,8 +333,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, * We can not use trigger here, as events are generated * as soon as sample_frequency is set. */ - ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, NULL, + ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, NULL, cros_ec_sensor_fifo_attributes); if (ret) return ret; @@ -413,7 +412,7 @@ static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev, int ret, i; bool calibrate; - ret = strtobool(buf, &calibrate); + ret = kstrtobool(buf, &calibrate); if (ret < 0) return ret; if (!calibrate) diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c index d538bf3ab1ef..793d628db55f 100644 --- a/drivers/iio/common/scmi_sensors/scmi_iio.c +++ b/drivers/iio/common/scmi_sensors/scmi_iio.c @@ -686,7 +686,6 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev) err = devm_iio_kfifo_buffer_setup(&scmi_iio_dev->dev, scmi_iio_dev, - INDIO_BUFFER_SOFTWARE, &scmi_iio_buffer_ops); if (err < 0) { dev_err(dev, diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c index 769bd9280524..f32b04b63ea1 100644 --- a/drivers/iio/common/ssp_sensors/ssp_spi.c +++ b/drivers/iio/common/ssp_sensors/ssp_spi.c @@ -331,12 +331,11 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len) /* threaded irq */ int ssp_irq_msg(struct ssp_data *data) { - bool found = false; char *buffer; u8 msg_type; int ret; u16 length, msg_options; - struct ssp_msg *msg, *n; + struct ssp_msg *msg = NULL, *iter, *n; ret = spi_read(data->spi, data->header_buffer, SSP_HEADER_BUFFER_SIZE); if (ret < 0) { @@ -362,15 +361,15 @@ int ssp_irq_msg(struct ssp_data *data) * received with no order */ mutex_lock(&data->pending_lock); - list_for_each_entry_safe(msg, n, &data->pending_list, list) { - if (msg->options == msg_options) { - list_del(&msg->list); - found = true; + list_for_each_entry_safe(iter, n, &data->pending_list, list) { + if (iter->options == msg_options) { + list_del(&iter->list); + msg = iter; break; } } - if (!found) { + if (!msg) { /* * here can be implemented dead messages handling * but the slave should not send such ones - it is to diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index fa9bcdf0d190..9910ba1da085 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -71,16 +71,18 @@ st_sensors_match_odr_error: int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) { - int err; + int err = 0; struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); + mutex_lock(&sdata->odr_lock); + if (!sdata->sensor_settings->odr.mask) - return 0; + goto unlock_mutex; err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); if (err < 0) - goto st_sensors_match_odr_error; + goto unlock_mutex; if ((sdata->sensor_settings->odr.addr == sdata->sensor_settings->pw.addr) && @@ -103,7 +105,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) if (err >= 0) sdata->odr = odr_out.hz; -st_sensors_match_odr_error: +unlock_mutex: + mutex_unlock(&sdata->odr_lock); + return err; } EXPORT_SYMBOL_NS(st_sensors_set_odr, IIO_ST_SENSORS); @@ -361,6 +365,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, struct st_sensors_platform_data *of_pdata; int err = 0; + mutex_init(&sdata->odr_lock); + /* If OF/DT pdata exists, it will take precedence of anything else */ of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata); if (IS_ERR(of_pdata)) @@ -549,26 +555,28 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, int err; struct st_sensor_data *sdata = iio_priv(indio_dev); - mutex_lock(&indio_dev->mlock); - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { - err = -EBUSY; + err = iio_device_claim_direct_mode(indio_dev); + if (err) + return err; + + mutex_lock(&sdata->odr_lock); + + err = st_sensors_set_enable(indio_dev, true); + if (err < 0) goto out; - } else { - err = st_sensors_set_enable(indio_dev, true); - if (err < 0) - goto out; - msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr); - err = st_sensors_read_axis_data(indio_dev, ch, val); - if (err < 0) - goto out; + msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr); + err = st_sensors_read_axis_data(indio_dev, ch, val); + if (err < 0) + goto out; - *val = *val >> ch->scan_type.shift; + *val = *val >> ch->scan_type.shift; + + err = st_sensors_set_enable(indio_dev, false); - err = st_sensors_set_enable(indio_dev, false); - } out: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&sdata->odr_lock); + iio_device_release_direct_mode(indio_dev); return err; } @@ -641,7 +649,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct st_sensor_data *sdata = iio_priv(indio_dev); - mutex_lock(&indio_dev->mlock); for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) { if (sdata->sensor_settings->odr.odr_avl[i].hz == 0) break; @@ -649,7 +656,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", sdata->sensor_settings->odr.odr_avl[i].hz); } - mutex_unlock(&indio_dev->mlock); buf[len - 1] = '\n'; return len; @@ -663,7 +669,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct st_sensor_data *sdata = iio_priv(indio_dev); - mutex_lock(&indio_dev->mlock); for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) { if (sdata->sensor_settings->fs.fs_avl[i].num == 0) break; @@ -673,7 +678,6 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r); } - mutex_unlock(&indio_dev->mlock); buf[len - 1] = '\n'; return len; diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index c0bf0d84197f..d1c7bde8aece 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -285,7 +285,6 @@ config CIO_DAC config DPOT_DAC tristate "DAC emulation using a DPOT" - depends on OF help Say yes here to build support for DAC emulation using a digital potentiometer. @@ -305,7 +304,7 @@ config DS4424 config LPC18XX_DAC tristate "NXP LPC18xx DAC driver" depends on ARCH_LPC18XX || COMPILE_TEST - depends on OF && HAS_IOMEM + depends on HAS_IOMEM help Say yes here to build support for NXP LPC18XX DAC. @@ -442,7 +441,6 @@ config TI_DAC7612 config VF610_DAC tristate "Vybrid vf610 DAC driver" - depends on OF depends on HAS_IOMEM help Say yes here to support Vybrid board digital-to-analog converter. diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 27ee2c63c5d4..d87cf14daabe 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -288,7 +288,7 @@ static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev, bool pwr_down; int ret; - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c index ecbc6a51d60f..22b000a40828 100644 --- a/drivers/iio/dac/ad5360.c +++ b/drivers/iio/dac/ad5360.c @@ -284,7 +284,7 @@ static ssize_t ad5360_write_dac_powerdown(struct device *dev, bool pwr_down; int ret; - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c index 82e1d9bd773e..a44c83242fb1 100644 --- a/drivers/iio/dac/ad5380.c +++ b/drivers/iio/dac/ad5380.c @@ -96,7 +96,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev, bool pwr_down; int ret; - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index fdf824041497..09e242949cd0 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -114,7 +114,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev, bool powerdown; int ret; - ret = strtobool(buf, &powerdown); + ret = kstrtobool(buf, &powerdown); if (ret) return ret; diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index 8507573aa13e..a0817e799cc0 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -182,7 +182,7 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev, int ret; struct ad5504_state *st = iio_priv(indio_dev); - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 371e812850eb..7e6f824de299 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -129,7 +129,7 @@ static ssize_t ad5624r_write_dac_powerdown(struct iio_dev *indio_dev, int ret; struct ad5624r_state *st = iio_priv(indio_dev); - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index f78dd3f33199..15361d8bbf94 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c @@ -73,7 +73,7 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev, unsigned int val, ref_bit_msk; u8 shift, address = 0; - ret = strtobool(buf, &readin); + ret = kstrtobool(buf, &readin); if (ret) return ret; diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c index 7a62e6e1d5f1..1a63b8456725 100644 --- a/drivers/iio/dac/ad5755.c +++ b/drivers/iio/dac/ad5755.c @@ -502,7 +502,7 @@ static ssize_t ad5755_write_powerdown(struct iio_dev *indio_dev, uintptr_t priv, bool pwr_down; int ret; - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c index 2b14914b4050..339564fe47d1 100644 --- a/drivers/iio/dac/ad5791.c +++ b/drivers/iio/dac/ad5791.c @@ -188,7 +188,7 @@ static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev, int ret; struct ad5791_state *st = iio_priv(indio_dev); - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c index 91eaaf793b3e..03edf046dec6 100644 --- a/drivers/iio/dac/ad7303.c +++ b/drivers/iio/dac/ad7303.c @@ -77,7 +77,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev, bool pwr_down; int ret; - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c index aed46c80757e..3a3c4f4874e4 100644 --- a/drivers/iio/dac/ltc2632.c +++ b/drivers/iio/dac/ltc2632.c @@ -10,6 +10,7 @@ #include <linux/spi/spi.h> #include <linux/module.h> #include <linux/iio/iio.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <asm/unaligned.h> @@ -149,7 +150,7 @@ static ssize_t ltc2632_write_dac_powerdown(struct iio_dev *indio_dev, int ret; struct ltc2632_state *st = iio_priv(indio_dev); - ret = strtobool(buf, &pwr_down); + ret = kstrtobool(buf, &pwr_down); if (ret) return ret; @@ -362,8 +363,7 @@ static int ltc2632_probe(struct spi_device *spi) } } - indio_dev->name = dev_of_node(&spi->dev) ? dev_of_node(&spi->dev)->name - : spi_get_device_id(spi)->name; + indio_dev->name = fwnode_get_name(dev_fwnode(&spi->dev)) ?: spi_get_device_id(spi)->name; indio_dev->info = <c2632_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = chip_info->channels; @@ -469,7 +469,7 @@ MODULE_DEVICE_TABLE(of, ltc2632_of_match); static struct spi_driver ltc2632_driver = { .driver = { .name = "ltc2632", - .of_match_table = of_match_ptr(ltc2632_of_match), + .of_match_table = ltc2632_of_match, }, .probe = ltc2632_probe, .remove = ltc2632_remove, diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c index 2f9c384885f4..937b0d25a11c 100644 --- a/drivers/iio/dac/ltc2688.c +++ b/drivers/iio/dac/ltc2688.c @@ -703,21 +703,20 @@ static int ltc2688_tgp_clk_setup(struct ltc2688_state *st, struct ltc2688_chan *chan, struct fwnode_handle *node, int tgp) { + struct device *dev = &st->spi->dev; unsigned long rate; struct clk *clk; int ret, f; - clk = devm_get_clk_from_child(&st->spi->dev, to_of_node(node), NULL); + clk = devm_get_clk_from_child(dev, to_of_node(node), NULL); if (IS_ERR(clk)) - return dev_err_probe(&st->spi->dev, PTR_ERR(clk), - "failed to get tgp clk.\n"); + return dev_err_probe(dev, PTR_ERR(clk), "failed to get tgp clk.\n"); ret = clk_prepare_enable(clk); if (ret) - return dev_err_probe(&st->spi->dev, ret, - "failed to enable tgp clk.\n"); + return dev_err_probe(dev, ret, "failed to enable tgp clk.\n"); - ret = devm_add_action_or_reset(&st->spi->dev, ltc2688_clk_disable, clk); + ret = devm_add_action_or_reset(dev, ltc2688_clk_disable, clk); if (ret) return ret; @@ -858,6 +857,7 @@ static int ltc2688_channel_config(struct ltc2688_state *st) static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref) { + struct device *dev = &st->spi->dev; struct gpio_desc *gpio; int ret; @@ -865,10 +865,9 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref) * If we have a reset pin, use that to reset the board, If not, use * the reset bit. */ - gpio = devm_gpiod_get_optional(&st->spi->dev, "clr", GPIOD_OUT_HIGH); + gpio = devm_gpiod_get_optional(dev, "clr", GPIOD_OUT_HIGH); if (IS_ERR(gpio)) - return dev_err_probe(&st->spi->dev, PTR_ERR(gpio), - "Failed to get reset gpio"); + return dev_err_probe(dev, PTR_ERR(gpio), "Failed to get reset gpio"); if (gpio) { usleep_range(1000, 1200); /* bring device out of reset */ @@ -887,7 +886,7 @@ static int ltc2688_setup(struct ltc2688_state *st, struct regulator *vref) * Duplicate the default channel configuration as it can change during * @ltc2688_channel_config() */ - st->iio_chan = devm_kmemdup(&st->spi->dev, ltc2688_channels, + st->iio_chan = devm_kmemdup(dev, ltc2688_channels, sizeof(ltc2688_channels), GFP_KERNEL); if (!st->iio_chan) return -ENOMEM; diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c index fce640b7f1c8..540f9ea7cada 100644 --- a/drivers/iio/dac/max5821.c +++ b/drivers/iio/dac/max5821.c @@ -116,7 +116,7 @@ static ssize_t max5821_write_dac_powerdown(struct iio_dev *indio_dev, bool powerdown; int ret; - ret = strtobool(buf, &powerdown); + ret = kstrtobool(buf, &powerdown); if (ret) return ret; diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 842bad57cb88..7fcb86288823 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -80,7 +80,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev, bool state; int ret; - ret = strtobool(buf, &state); + ret = kstrtobool(buf, &state); if (ret < 0) return ret; @@ -178,7 +178,7 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev, bool state; int ret; - ret = strtobool(buf, &state); + ret = kstrtobool(buf, &state); if (ret) return ret; diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c index b20192a071cb..daa42bcbae83 100644 --- a/drivers/iio/dac/stm32-dac.c +++ b/drivers/iio/dac/stm32-dac.c @@ -220,7 +220,7 @@ static ssize_t stm32_dac_write_powerdown(struct iio_dev *indio_dev, bool powerdown; int ret; - ret = strtobool(buf, &powerdown); + ret = kstrtobool(buf, &powerdown); if (ret) return ret; diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c index 4e1156e6deb2..106ce3546419 100644 --- a/drivers/iio/dac/ti-dac082s085.c +++ b/drivers/iio/dac/ti-dac082s085.c @@ -133,7 +133,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev, bool powerdown; int ret; - ret = strtobool(buf, &powerdown); + ret = kstrtobool(buf, &powerdown); if (ret) return ret; diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c index 0b775f943db3..4b6b04038e94 100644 --- a/drivers/iio/dac/ti-dac5571.c +++ b/drivers/iio/dac/ti-dac5571.c @@ -179,7 +179,7 @@ static ssize_t dac5571_write_powerdown(struct iio_dev *indio_dev, bool powerdown; int ret; - ret = strtobool(buf, &powerdown); + ret = kstrtobool(buf, &powerdown); if (ret) return ret; diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c index e10d17e60ed3..4afc411725d9 100644 --- a/drivers/iio/dac/ti-dac7311.c +++ b/drivers/iio/dac/ti-dac7311.c @@ -123,7 +123,7 @@ static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev, u8 power; int ret; - ret = strtobool(buf, &powerdown); + ret = kstrtobool(buf, &powerdown); if (ret) return ret; diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c index c0b7ef900735..c24f609c2ade 100644 --- a/drivers/iio/dummy/iio_simple_dummy.c +++ b/drivers/iio/dummy/iio_simple_dummy.c @@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) */ swd = kzalloc(sizeof(*swd), GFP_KERNEL); - if (!swd) { - ret = -ENOMEM; - goto error_kzalloc; - } + if (!swd) + return ERR_PTR(-ENOMEM); + /* * Allocate an IIO device. * @@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) indio_dev = iio_device_alloc(parent, sizeof(*st)); if (!indio_dev) { ret = -ENOMEM; - goto error_ret; + goto error_free_swd; } st = iio_priv(indio_dev); @@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) * indio_dev->name = spi_get_device_id(spi)->name; */ indio_dev->name = kstrdup(name, GFP_KERNEL); + if (!indio_dev->name) { + ret = -ENOMEM; + goto error_free_device; + } /* Provide description of available channels */ indio_dev->channels = iio_dummy_channels; @@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) ret = iio_simple_dummy_events_register(indio_dev); if (ret < 0) - goto error_free_device; + goto error_free_name; ret = iio_simple_dummy_configure_buffer(indio_dev); if (ret < 0) @@ -649,11 +652,12 @@ error_unconfigure_buffer: iio_simple_dummy_unconfigure_buffer(indio_dev); error_unregister_events: iio_simple_dummy_events_unregister(indio_dev); +error_free_name: + kfree(indio_dev->name); error_free_device: iio_device_free(indio_dev); -error_ret: +error_free_swd: kfree(swd); -error_kzalloc: return ERR_PTR(ret); } diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c index a0f92c336fc4..942870539268 100644 --- a/drivers/iio/frequency/ad9523.c +++ b/drivers/iio/frequency/ad9523.c @@ -516,7 +516,7 @@ static ssize_t ad9523_store(struct device *dev, bool state; int ret; - ret = strtobool(buf, &state); + ret = kstrtobool(buf, &state); if (ret < 0) return ret; diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c index 410e5e9f2672..0923fd793492 100644 --- a/drivers/iio/gyro/fxas21002c_core.c +++ b/drivers/iio/gyro/fxas21002c_core.c @@ -7,9 +7,9 @@ #include <linux/interrupt.h> #include <linux/module.h> -#include <linux/of_irq.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> @@ -822,7 +822,6 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data) { struct device *dev = regmap_get_device(data->regmap); struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct device_node *np = indio_dev->dev.of_node; unsigned long irq_trig; bool irq_open_drain; int irq1; @@ -831,8 +830,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data) if (!data->irq) return 0; - irq1 = of_irq_get_byname(np, "INT1"); - + irq1 = fwnode_irq_get_byname(dev_fwnode(dev), "INT1"); if (irq1 == data->irq) { dev_info(dev, "using interrupt line INT1\n"); ret = regmap_field_write(data->regmap_fields[F_INT_CFG_DRDY], @@ -843,7 +841,7 @@ static int fxas21002c_trigger_probe(struct fxas21002c_data *data) dev_info(dev, "using interrupt line INT2\n"); - irq_open_drain = of_property_read_bool(np, "drive-open-drain"); + irq_open_drain = device_property_read_bool(dev, "drive-open-drain"); data->dready_trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name, diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index ea387efab62d..4f19dc7ffe57 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -26,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/random.h> #include <linux/slab.h> @@ -1050,6 +1051,7 @@ static const struct iio_trigger_ops mpu3050_trigger_ops = { static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq) { struct mpu3050 *mpu3050 = iio_priv(indio_dev); + struct device *dev = mpu3050->dev; unsigned long irq_trig; int ret; @@ -1061,8 +1063,7 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq) return -ENOMEM; /* Check if IRQ is open drain */ - if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain")) - mpu3050->irq_opendrain = true; + mpu3050->irq_opendrain = device_property_read_bool(dev, "drive-open-drain"); irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); /* @@ -1118,13 +1119,12 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq) mpu3050->trig->name, mpu3050->trig); if (ret) { - dev_err(mpu3050->dev, - "can't get IRQ %d, error %d\n", irq, ret); + dev_err(dev, "can't get IRQ %d, error %d\n", irq, ret); return ret; } mpu3050->irq = irq; - mpu3050->trig->dev.parent = mpu3050->dev; + mpu3050->trig->dev.parent = dev; mpu3050->trig->ops = &mpu3050_trigger_ops; iio_trigger_set_drvdata(mpu3050->trig, indio_dev); @@ -1263,7 +1263,7 @@ err_power_down: } EXPORT_SYMBOL(mpu3050_common_probe); -int mpu3050_common_remove(struct device *dev) +void mpu3050_common_remove(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct mpu3050 *mpu3050 = iio_priv(indio_dev); @@ -1276,8 +1276,6 @@ int mpu3050_common_remove(struct device *dev) free_irq(mpu3050->irq, mpu3050); iio_device_unregister(indio_dev); mpu3050_power_down(mpu3050); - - return 0; } EXPORT_SYMBOL(mpu3050_common_remove); diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c index ef5bcbc4b45b..5b5f58baaf7f 100644 --- a/drivers/iio/gyro/mpu3050-i2c.c +++ b/drivers/iio/gyro/mpu3050-i2c.c @@ -86,7 +86,9 @@ static int mpu3050_i2c_remove(struct i2c_client *client) if (mpu3050->i2cmux) i2c_mux_del_adapters(mpu3050->i2cmux); - return mpu3050_common_remove(&client->dev); + mpu3050_common_remove(&client->dev); + + return 0; } /* diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h index 835b0249c376..faf4168a3b07 100644 --- a/drivers/iio/gyro/mpu3050.h +++ b/drivers/iio/gyro/mpu3050.h @@ -91,7 +91,7 @@ int mpu3050_common_probe(struct device *dev, struct regmap *map, int irq, const char *name); -int mpu3050_common_remove(struct device *dev); +void mpu3050_common_remove(struct device *dev); /* PM ops */ extern const struct dev_pm_ops mpu3050_dev_pm_ops; diff --git a/drivers/iio/gyro/ssp_gyro_sensor.c b/drivers/iio/gyro/ssp_gyro_sensor.c index 5fd1bf9902ea..d332474bc484 100644 --- a/drivers/iio/gyro/ssp_gyro_sensor.c +++ b/drivers/iio/gyro/ssp_gyro_sensor.c @@ -113,7 +113,6 @@ static int ssp_gyro_probe(struct platform_device *pdev) indio_dev->available_scan_masks = ssp_gyro_scan_mask; ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ssp_gyro_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 62172e18d0d8..eaa35da42b33 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -406,24 +406,17 @@ read_error: static int st_gyro_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { - int err; - switch (mask) { case IIO_CHAN_INFO_SCALE: - err = st_sensors_set_fullscale_by_gain(indio_dev, val2); - break; + return st_sensors_set_fullscale_by_gain(indio_dev, val2); case IIO_CHAN_INFO_SAMP_FREQ: if (val2) return -EINVAL; - mutex_lock(&indio_dev->mlock); - err = st_sensors_set_odr(indio_dev, val); - mutex_unlock(&indio_dev->mlock); - return err; + + return st_sensors_set_odr(indio_dev, val); default: - err = -EINVAL; + return -EINVAL; } - - return err; } static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL(); diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c index 36ba7611d9ce..ad5717965223 100644 --- a/drivers/iio/health/max30100.c +++ b/drivers/iio/health/max30100.c @@ -433,7 +433,6 @@ static int max30100_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &max30100_buffer_setup_ops); if (ret) return ret; diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 2292876c55e2..abbcef563807 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -542,7 +542,6 @@ static int max30102_probe(struct i2c_client *client, } ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &max30102_buffer_setup_ops); if (ret) return ret; diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index 44bbe3d19907..fe520194a837 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -7,14 +7,16 @@ #include <linux/clk.h> #include <linux/bitfield.h> -#include <linux/of_irq.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/math.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/lcm.h> +#include <linux/property.h> #include <linux/swab.h> #include <linux/crc32.h> @@ -1119,6 +1121,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct adis16480 *st = iio_priv(indio_dev); struct adis *adis = &st->adis; + struct device *dev = &adis->spi->dev; int ret, bit, offset, i = 0; __be16 *buffer; u32 crc; @@ -1130,7 +1133,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p) adis->tx[1] = 0; ret = spi_write(adis->spi, adis->tx, 2); if (ret) { - dev_err(&adis->spi->dev, "Failed to change device page: %d\n", ret); + dev_err(dev, "Failed to change device page: %d\n", ret); adis_dev_unlock(adis); goto irq_done; } @@ -1140,7 +1143,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p) ret = spi_sync(adis->spi, &adis->msg); if (ret) { - dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret); + dev_err(dev, "Failed to read data: %d\n", ret); adis_dev_unlock(adis); goto irq_done; } @@ -1168,14 +1171,14 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p) } if (offset == 4) { - dev_err(&adis->spi->dev, "Invalid burst data\n"); + dev_err(dev, "Invalid burst data\n"); goto irq_done; } crc = be16_to_cpu(buffer[offset + 16]) << 16 | be16_to_cpu(buffer[offset + 15]); valid = adis16480_validate_crc((u16 *)&buffer[offset], 15, crc); if (!valid) { - dev_err(&adis->spi->dev, "Invalid crc\n"); + dev_err(dev, "Invalid crc\n"); goto irq_done; } @@ -1214,12 +1217,12 @@ static const struct iio_info adis16480_info = { static int adis16480_stop_device(struct iio_dev *indio_dev) { struct adis16480 *st = iio_priv(indio_dev); + struct device *dev = &st->adis.spi->dev; int ret; ret = adis_write_reg_16(&st->adis, ADIS16480_REG_SLP_CNT, BIT(9)); if (ret) - dev_err(&indio_dev->dev, - "Could not power down device: %d\n", ret); + dev_err(dev, "Could not power down device: %d\n", ret); return ret; } @@ -1239,9 +1242,10 @@ static int adis16480_enable_irq(struct adis *adis, bool enable) return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val); } -static int adis16480_config_irq_pin(struct device_node *of_node, - struct adis16480 *st) +static int adis16480_config_irq_pin(struct adis16480 *st) { + struct device *dev = &st->adis.spi->dev; + struct fwnode_handle *fwnode = dev_fwnode(dev); struct irq_data *desc; enum adis16480_int_pin pin; unsigned int irq_type; @@ -1250,7 +1254,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node, desc = irq_get_irq_data(st->adis.spi->irq); if (!desc) { - dev_err(&st->adis.spi->dev, "Could not find IRQ %d\n", irq); + dev_err(dev, "Could not find IRQ %d\n", irq); return -EINVAL; } @@ -1267,7 +1271,7 @@ static int adis16480_config_irq_pin(struct device_node *of_node, */ pin = ADIS16480_PIN_DIO1; for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) { - irq = of_irq_get_byname(of_node, adis16480_int_pin_names[i]); + irq = fwnode_irq_get_byname(fwnode, adis16480_int_pin_names[i]); if (irq > 0) { pin = i; break; @@ -1287,23 +1291,22 @@ static int adis16480_config_irq_pin(struct device_node *of_node, } else if (irq_type == IRQ_TYPE_EDGE_FALLING) { val |= ADIS16480_DRDY_POL(0); } else { - dev_err(&st->adis.spi->dev, - "Invalid interrupt type 0x%x specified\n", irq_type); + dev_err(dev, "Invalid interrupt type 0x%x specified\n", irq_type); return -EINVAL; } /* Write the data ready configuration to the FNCTIO_CTRL register */ return adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val); } -static int adis16480_of_get_ext_clk_pin(struct adis16480 *st, - struct device_node *of_node) +static int adis16480_fw_get_ext_clk_pin(struct adis16480 *st) { + struct device *dev = &st->adis.spi->dev; const char *ext_clk_pin; enum adis16480_int_pin pin; int i; pin = ADIS16480_PIN_DIO2; - if (of_property_read_string(of_node, "adi,ext-clk-pin", &ext_clk_pin)) + if (device_property_read_string(dev, "adi,ext-clk-pin", &ext_clk_pin)) goto clk_input_not_found; for (i = 0; i < ARRAY_SIZE(adis16480_int_pin_names); i++) { @@ -1312,15 +1315,13 @@ static int adis16480_of_get_ext_clk_pin(struct adis16480 *st, } clk_input_not_found: - dev_info(&st->adis.spi->dev, - "clk input line not specified, using DIO2\n"); + dev_info(dev, "clk input line not specified, using DIO2\n"); return pin; } -static int adis16480_ext_clk_config(struct adis16480 *st, - struct device_node *of_node, - bool enable) +static int adis16480_ext_clk_config(struct adis16480 *st, bool enable) { + struct device *dev = &st->adis.spi->dev; unsigned int mode, mask; enum adis16480_int_pin pin; uint16_t val; @@ -1330,16 +1331,14 @@ static int adis16480_ext_clk_config(struct adis16480 *st, if (ret) return ret; - pin = adis16480_of_get_ext_clk_pin(st, of_node); + pin = adis16480_fw_get_ext_clk_pin(st); /* * Each DIOx pin supports only one function at a time. When a single pin * has two assignments, the enable bit for a lower priority function * automatically resets to zero (disabling the lower priority function). */ if (pin == ADIS16480_DRDY_SEL(val)) - dev_warn(&st->adis.spi->dev, - "DIO%x pin supports only one function at a time\n", - pin + 1); + dev_warn(dev, "DIO%x pin supports only one function at a time\n", pin + 1); mode = ADIS16480_SYNC_EN(enable) | ADIS16480_SYNC_SEL(pin); mask = ADIS16480_SYNC_EN_MSK | ADIS16480_SYNC_SEL_MSK; @@ -1361,31 +1360,27 @@ static int adis16480_ext_clk_config(struct adis16480 *st, static int adis16480_get_ext_clocks(struct adis16480 *st) { - st->clk_mode = ADIS16480_CLK_INT; - st->ext_clk = devm_clk_get(&st->adis.spi->dev, "sync"); - if (!IS_ERR_OR_NULL(st->ext_clk)) { + struct device *dev = &st->adis.spi->dev; + + st->ext_clk = devm_clk_get_optional(dev, "sync"); + if (IS_ERR(st->ext_clk)) + return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n"); + if (st->ext_clk) { st->clk_mode = ADIS16480_CLK_SYNC; return 0; } - if (PTR_ERR(st->ext_clk) != -ENOENT) { - dev_err(&st->adis.spi->dev, "failed to get ext clk\n"); - return PTR_ERR(st->ext_clk); - } - if (st->chip_info->has_pps_clk_mode) { - st->ext_clk = devm_clk_get(&st->adis.spi->dev, "pps"); - if (!IS_ERR_OR_NULL(st->ext_clk)) { + st->ext_clk = devm_clk_get_optional(dev, "pps"); + if (IS_ERR(st->ext_clk)) + return dev_err_probe(dev, PTR_ERR(st->ext_clk), "failed to get ext clk\n"); + if (st->ext_clk) { st->clk_mode = ADIS16480_CLK_PPS; return 0; } - - if (PTR_ERR(st->ext_clk) != -ENOENT) { - dev_err(&st->adis.spi->dev, "failed to get ext clk\n"); - return PTR_ERR(st->ext_clk); - } } + st->clk_mode = ADIS16480_CLK_INT; return 0; } @@ -1404,11 +1399,12 @@ static int adis16480_probe(struct spi_device *spi) const struct spi_device_id *id = spi_get_device_id(spi); const struct adis_data *adis16480_data; irq_handler_t trigger_handler = NULL; + struct device *dev = &spi->dev; struct iio_dev *indio_dev; struct adis16480 *st; int ret; - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; @@ -1432,13 +1428,12 @@ static int adis16480_probe(struct spi_device *spi) return ret; if (st->chip_info->has_sleep_cnt) { - ret = devm_add_action_or_reset(&spi->dev, adis16480_stop, - indio_dev); + ret = devm_add_action_or_reset(dev, adis16480_stop, indio_dev); if (ret) return ret; } - ret = adis16480_config_irq_pin(spi->dev.of_node, st); + ret = adis16480_config_irq_pin(st); if (ret) return ret; @@ -1446,12 +1441,12 @@ static int adis16480_probe(struct spi_device *spi) if (ret) return ret; - if (!IS_ERR_OR_NULL(st->ext_clk)) { - ret = adis16480_ext_clk_config(st, spi->dev.of_node, true); + if (st->ext_clk) { + ret = adis16480_ext_clk_config(st, true); if (ret) return ret; - ret = devm_add_action_or_reset(&spi->dev, adis16480_clk_disable, st->ext_clk); + ret = devm_add_action_or_reset(dev, adis16480_clk_disable, st->ext_clk); if (ret) return ret; @@ -1484,7 +1479,7 @@ static int adis16480_probe(struct spi_device *spi) if (ret) return ret; - ret = devm_iio_device_register(&spi->dev, indio_dev); + ret = devm_iio_device_register(dev, indio_dev); if (ret) return ret; diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c index 01336105792e..e7aec56ea136 100644 --- a/drivers/iio/imu/bmi160/bmi160_core.c +++ b/drivers/iio/imu/bmi160/bmi160_core.c @@ -11,10 +11,9 @@ */ #include <linux/module.h> #include <linux/regmap.h> -#include <linux/acpi.h> #include <linux/delay.h> #include <linux/irq.h> -#include <linux/of_irq.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/iio/iio.h> @@ -525,17 +524,6 @@ static const struct iio_info bmi160_info = { .attrs = &bmi160_attrs_group, }; -static const char *bmi160_match_acpi_device(struct device *dev) -{ - const struct acpi_device_id *id; - - id = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!id) - return NULL; - - return dev_name(dev); -} - static int bmi160_write_conf_reg(struct regmap *regmap, unsigned int reg, unsigned int mask, unsigned int bits, unsigned int write_usleep) @@ -647,18 +635,18 @@ int bmi160_enable_irq(struct regmap *regmap, bool enable) } EXPORT_SYMBOL(bmi160_enable_irq); -static int bmi160_get_irq(struct device_node *of_node, enum bmi160_int_pin *pin) +static int bmi160_get_irq(struct fwnode_handle *fwnode, enum bmi160_int_pin *pin) { int irq; /* Use INT1 if possible, otherwise fall back to INT2. */ - irq = of_irq_get_byname(of_node, "INT1"); + irq = fwnode_irq_get_byname(fwnode, "INT1"); if (irq > 0) { *pin = BMI160_PIN_INT1; return irq; } - irq = of_irq_get_byname(of_node, "INT2"); + irq = fwnode_irq_get_byname(fwnode, "INT2"); if (irq > 0) *pin = BMI160_PIN_INT2; @@ -688,7 +676,7 @@ static int bmi160_config_device_irq(struct iio_dev *indio_dev, int irq_type, return -EINVAL; } - open_drain = of_property_read_bool(dev->of_node, "drive-open-drain"); + open_drain = device_property_read_bool(dev, "drive-open-drain"); return bmi160_config_pin(data->regmap, pin, open_drain, irq_mask, BMI160_NORMAL_WRITE_USLEEP); @@ -872,9 +860,6 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap, if (ret) return ret; - if (!name && ACPI_HANDLE(dev)) - name = bmi160_match_acpi_device(dev); - indio_dev->channels = bmi160_channels; indio_dev->num_channels = ARRAY_SIZE(bmi160_channels); indio_dev->name = name; @@ -887,7 +872,7 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap, if (ret) return ret; - irq = bmi160_get_irq(dev->of_node, &int_pin); + irq = bmi160_get_irq(dev_fwnode(dev), &int_pin); if (irq > 0) { ret = bmi160_setup_irq(indio_dev, irq, int_pin); if (ret) diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c index 26398614eddf..02f149d37b17 100644 --- a/drivers/iio/imu/bmi160/bmi160_i2c.c +++ b/drivers/iio/imu/bmi160/bmi160_i2c.c @@ -8,10 +8,9 @@ * - 0x68 if SDO is pulled to GND * - 0x69 if SDO is pulled to VDDIO */ -#include <linux/acpi.h> #include <linux/i2c.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/regmap.h> #include "bmi160.h" @@ -20,7 +19,7 @@ static int bmi160_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct regmap *regmap; - const char *name = NULL; + const char *name; regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config); if (IS_ERR(regmap)) { @@ -31,6 +30,8 @@ static int bmi160_i2c_probe(struct i2c_client *client, if (id) name = id->name; + else + name = dev_name(&client->dev); return bmi160_core_probe(&client->dev, regmap, name, false); } @@ -47,19 +48,17 @@ static const struct acpi_device_id bmi160_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match); -#ifdef CONFIG_OF static const struct of_device_id bmi160_of_match[] = { { .compatible = "bosch,bmi160" }, { }, }; MODULE_DEVICE_TABLE(of, bmi160_of_match); -#endif static struct i2c_driver bmi160_i2c_driver = { .driver = { .name = "bmi160_i2c", - .acpi_match_table = ACPI_PTR(bmi160_acpi_match), - .of_match_table = of_match_ptr(bmi160_of_match), + .acpi_match_table = bmi160_acpi_match, + .of_match_table = bmi160_of_match, }, .probe = bmi160_i2c_probe, .id_table = bmi160_i2c_id, diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c index 61389b41c6d9..24f7d75c7903 100644 --- a/drivers/iio/imu/bmi160/bmi160_spi.c +++ b/drivers/iio/imu/bmi160/bmi160_spi.c @@ -5,9 +5,8 @@ * Copyright (c) 2016, Intel Corporation. * */ -#include <linux/acpi.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/regmap.h> #include <linux/spi/spi.h> @@ -17,6 +16,7 @@ static int bmi160_spi_probe(struct spi_device *spi) { struct regmap *regmap; const struct spi_device_id *id = spi_get_device_id(spi); + const char *name; regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config); if (IS_ERR(regmap)) { @@ -24,7 +24,13 @@ static int bmi160_spi_probe(struct spi_device *spi) regmap); return PTR_ERR(regmap); } - return bmi160_core_probe(&spi->dev, regmap, id->name, true); + + if (id) + name = id->name; + else + name = dev_name(&spi->dev); + + return bmi160_core_probe(&spi->dev, regmap, name, true); } static const struct spi_device_id bmi160_spi_id[] = { @@ -39,20 +45,18 @@ static const struct acpi_device_id bmi160_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match); -#ifdef CONFIG_OF static const struct of_device_id bmi160_of_match[] = { { .compatible = "bosch,bmi160" }, { }, }; MODULE_DEVICE_TABLE(of, bmi160_of_match); -#endif static struct spi_driver bmi160_spi_driver = { .probe = bmi160_spi_probe, .id_table = bmi160_spi_id, .driver = { - .acpi_match_table = ACPI_PTR(bmi160_acpi_match), - .of_match_table = of_match_ptr(bmi160_of_match), + .acpi_match_table = bmi160_acpi_match, + .of_match_table = bmi160_of_match, .name = "bmi160_spi", }, }; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c index 383cc3250342..c3f433ad3af6 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c @@ -731,7 +731,6 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st) indio_dev->available_scan_masks = inv_icm42600_accel_scan_masks; ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &inv_icm42600_buffer_ops); if (ret) return ERR_PTR(ret); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c index cec1dd0e0464..9d94a8518e3c 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c @@ -743,7 +743,6 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st) indio_dev->setup_ops = &inv_icm42600_buffer_ops; ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &inv_icm42600_buffer_ops); if (ret) return ERR_PTR(ret); diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig index 9c625517173a..3636b1bc90f1 100644 --- a/drivers/iio/imu/inv_mpu6050/Kconfig +++ b/drivers/iio/imu/inv_mpu6050/Kconfig @@ -16,7 +16,7 @@ config INV_MPU6050_I2C select REGMAP_I2C help This driver supports the Invensense MPU6050/9150, - MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 + MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690 and IAM20680 motion tracking devices over I2C. This driver can be built as a module. The module will be called inv-mpu6050-i2c. @@ -28,7 +28,7 @@ config INV_MPU6050_SPI select REGMAP_SPI help This driver supports the Invensense MPU6000, - MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 + MPU6500/6515/6880/9250/9255, ICM20608(D)/20609/20689, ICM20602/ICM20690 and IAM20680 motion tracking devices over SPI. This driver can be built as a module. The module will be called inv-mpu6050-spi. diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 597768c29a72..86fbbe904050 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -218,6 +218,15 @@ static const struct inv_mpu6050_hw hw_info[] = { .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME}, }, { + .whoami = INV_ICM20608D_WHOAMI_VALUE, + .name = "ICM20608D", + .reg = ®_set_6500, + .config = &chip_config_6500, + .fifo_size = 512, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, + .startup_time = {INV_MPU6500_GYRO_STARTUP_TIME, INV_MPU6500_ACCEL_STARTUP_TIME}, + }, + { .whoami = INV_ICM20609_WHOAMI_VALUE, .name = "ICM20609", .reg = ®_set_6500, diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c index 55cffb5fa115..2aa647704a79 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -29,6 +29,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev) switch (st->chip_type) { case INV_ICM20608: + case INV_ICM20608D: case INV_ICM20609: case INV_ICM20689: case INV_ICM20602: @@ -182,6 +183,7 @@ static const struct i2c_device_id inv_mpu_id[] = { {"mpu9250", INV_MPU9250}, {"mpu9255", INV_MPU9255}, {"icm20608", INV_ICM20608}, + {"icm20608d", INV_ICM20608D}, {"icm20609", INV_ICM20609}, {"icm20689", INV_ICM20689}, {"icm20602", INV_ICM20602}, @@ -226,6 +228,10 @@ static const struct of_device_id inv_of_match[] = { .data = (void *)INV_ICM20608 }, { + .compatible = "invensense,icm20608d", + .data = (void *)INV_ICM20608D + }, + { .compatible = "invensense,icm20609", .data = (void *)INV_ICM20609 }, diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index c6aa36ee966a..8e14f20b1314 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -76,6 +76,7 @@ enum inv_devices { INV_MPU9250, INV_MPU9255, INV_ICM20608, + INV_ICM20608D, INV_ICM20609, INV_ICM20689, INV_ICM20602, @@ -394,6 +395,7 @@ struct inv_mpu6050_state { #define INV_MPU9255_WHOAMI_VALUE 0x73 #define INV_MPU6515_WHOAMI_VALUE 0x74 #define INV_ICM20608_WHOAMI_VALUE 0xAF +#define INV_ICM20608D_WHOAMI_VALUE 0xAE #define INV_ICM20609_WHOAMI_VALUE 0xA6 #define INV_ICM20689_WHOAMI_VALUE 0x98 #define INV_ICM20602_WHOAMI_VALUE 0x12 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c index 26a7c2521dc4..e6107b0cc38f 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c @@ -73,6 +73,7 @@ static const struct spi_device_id inv_mpu_id[] = { {"mpu9250", INV_MPU9250}, {"mpu9255", INV_MPU9255}, {"icm20608", INV_ICM20608}, + {"icm20608d", INV_ICM20608D}, {"icm20609", INV_ICM20609}, {"icm20689", INV_ICM20689}, {"icm20602", INV_ICM20602}, @@ -113,6 +114,10 @@ static const struct of_device_id inv_of_match[] = { .data = (void *)INV_ICM20608 }, { + .compatible = "invensense,icm20608d", + .data = (void *)INV_ICM20608D + }, + { .compatible = "invensense,icm20609", .data = (void *)INV_ICM20609 }, diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig index 85860217aaf3..fefd0b939100 100644 --- a/drivers/iio/imu/st_lsm6dsx/Kconfig +++ b/drivers/iio/imu/st_lsm6dsx/Kconfig @@ -11,9 +11,9 @@ config IIO_ST_LSM6DSX help Say yes here to build support for STMicroelectronics LSM6DSx imu sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm, - ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c, - ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, the accelerometer/gyroscope - of lsm9ds1 and lsm6dst. + ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr, + lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, + the accelerometer/gyroscope of lsm9ds1 and lsm6dst. To compile this driver as a module, choose M here: the module will be called st_lsm6dsx. diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h index 6ac4eac36458..a86dd29a4738 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h @@ -31,6 +31,7 @@ #define ST_LSM6DSRX_DEV_NAME "lsm6dsrx" #define ST_LSM6DST_DEV_NAME "lsm6dst" #define ST_LSM6DSOP_DEV_NAME "lsm6dsop" +#define ST_ASM330LHHX_DEV_NAME "asm330lhhx" enum st_lsm6dsx_hw_id { ST_LSM6DS3_ID, @@ -49,6 +50,7 @@ enum st_lsm6dsx_hw_id { ST_LSM6DSRX_ID, ST_LSM6DST_ID, ST_LSM6DSOP_ID, + ST_ASM330LHHX_ID, ST_LSM6DSX_MAX_ID, }; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 16730a780964..c7d3730ab1c5 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -14,7 +14,8 @@ * (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the * value of the decimation factor and ODR set for each FIFO data set. * - * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/LSM6DSRX/ISM330DHCX/LSM6DST/LSM6DSOP: + * LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/ + * LSM6DST/LSM6DSOP: * The FIFO buffer can be configured to store data from gyroscope and * accelerometer. Each sample is queued with a tag (1B) indicating data * source (gyroscope, accelerometer, hw timer). @@ -746,7 +747,6 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw) continue; ret = devm_iio_kfifo_buffer_setup(hw->dev, hw->iio_devs[i], - INDIO_BUFFER_SOFTWARE, &st_lsm6dsx_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index b1d8d5a66f01..910397716833 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -26,7 +26,7 @@ * - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000 * - FIFO size: 4KB * - * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP: + * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP: * - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416, * 833 * - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16 @@ -786,6 +786,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .hw_id = ST_LSM6DST_ID, .name = ST_LSM6DST_DEV_NAME, .wai = 0x6d, + }, { + .hw_id = ST_ASM330LHHX_ID, + .name = ST_ASM330LHHX_DEV_NAME, + .wai = 0x6b, }, }, .channels = { diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c index 8b4fc2c15622..715fbdc8190e 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c @@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = { .compatible = "st,lsm6dsop", .data = (void *)ST_LSM6DSOP_ID, }, + { + .compatible = "st,asm330lhhx", + .data = (void *)ST_ASM330LHHX_ID, + }, {}, }; MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match); @@ -122,6 +126,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = { { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID }, { ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID }, { ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID }, + { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID }, {}, }; MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c index e80110b6b280..f5767cf76c1d 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c @@ -101,6 +101,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = { .compatible = "st,lsm6dsop", .data = (void *)ST_LSM6DSOP_ID, }, + { + .compatible = "st,asm330lhhx", + .data = (void *)ST_ASM330LHHX_ID, + }, {}, }; MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match); @@ -122,6 +126,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = { { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID }, { ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID }, { ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID }, + { ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID }, {}, }; MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index b078eb2f3c9d..06141ca27e1f 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -510,7 +510,7 @@ static ssize_t iio_scan_el_store(struct device *dev, struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_buffer *buffer = this_attr->buffer; - ret = strtobool(buf, &state); + ret = kstrtobool(buf, &state); if (ret < 0) return ret; mutex_lock(&indio_dev->mlock); @@ -557,7 +557,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; bool state; - ret = strtobool(buf, &state); + ret = kstrtobool(buf, &state); if (ret < 0) return ret; @@ -915,7 +915,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, if (scan_mask == NULL) return -EINVAL; } else { - scan_mask = compound_mask; + scan_mask = compound_mask; } config->scan_bytes = iio_compute_scan_bytes(indio_dev, @@ -1059,13 +1059,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, struct iio_device_config *config) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); - struct iio_buffer *buffer; + struct iio_buffer *buffer, *tmp = NULL; int ret; indio_dev->active_scan_mask = config->scan_mask; indio_dev->scan_timestamp = config->scan_timestamp; indio_dev->scan_bytes = config->scan_bytes; - indio_dev->currentmode = config->mode; + iio_dev_opaque->currentmode = config->mode; iio_update_demux(indio_dev); @@ -1097,11 +1097,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { ret = iio_buffer_enable(buffer, indio_dev); - if (ret) + if (ret) { + tmp = buffer; goto err_disable_buffers; + } } - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { ret = iio_trigger_attach_poll_func(indio_dev->trig, indio_dev->pollfunc); if (ret) @@ -1120,11 +1122,12 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, return 0; err_detach_pollfunc: - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { iio_trigger_detach_poll_func(indio_dev->trig, indio_dev->pollfunc); } err_disable_buffers: + buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list); list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, buffer_list) iio_buffer_disable(buffer, indio_dev); @@ -1132,7 +1135,7 @@ err_run_postdisable: if (indio_dev->setup_ops->postdisable) indio_dev->setup_ops->postdisable(indio_dev); err_undo_config: - indio_dev->currentmode = INDIO_DIRECT_MODE; + iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; indio_dev->active_scan_mask = NULL; return ret; @@ -1162,7 +1165,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) ret = ret2; } - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { iio_trigger_detach_poll_func(indio_dev->trig, indio_dev->pollfunc); } @@ -1181,7 +1184,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); indio_dev->active_scan_mask = NULL; - indio_dev->currentmode = INDIO_DIRECT_MODE; + iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; return ret; } @@ -1300,7 +1303,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev, struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; bool inlist; - ret = strtobool(buf, &requested_state); + ret = kstrtobool(buf, &requested_state); if (ret < 0) return ret; @@ -1629,6 +1632,19 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, if (channels[i].scan_index < 0) continue; + /* Verify that sample bits fit into storage */ + if (channels[i].scan_type.storagebits < + channels[i].scan_type.realbits + + channels[i].scan_type.shift) { + dev_err(&indio_dev->dev, + "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n", + i, channels[i].scan_type.storagebits, + channels[i].scan_type.realbits, + channels[i].scan_type.shift); + ret = -EINVAL; + goto error_cleanup_dynamic; + } + ret = iio_buffer_add_channel_sysfs(indio_dev, buffer, &channels[i]); if (ret < 0) @@ -1649,7 +1665,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, } attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs); - attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL); + attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL); if (!attr) { ret = -ENOMEM; goto error_free_scan_mask; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index e1ed44dec2ab..adf054c7a75e 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -185,6 +185,20 @@ int iio_device_id(struct iio_dev *indio_dev) EXPORT_SYMBOL_GPL(iio_device_id); /** + * iio_buffer_enabled() - helper function to test if the buffer is enabled + * @indio_dev: IIO device structure for device + */ +bool iio_buffer_enabled(struct iio_dev *indio_dev) +{ + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + return iio_dev_opaque->currentmode + & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | + INDIO_BUFFER_SOFTWARE); +} +EXPORT_SYMBOL_GPL(iio_buffer_enabled); + +/** * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps * @array: array of strings * @n: number of strings in the array @@ -892,8 +906,7 @@ static int __iio_str_to_fixpoint(const char *str, int fract_mult, } else if (*str == '\n') { if (*(str + 1) == '\0') break; - else - return -EINVAL; + return -EINVAL; } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { /* Ignore the dB suffix */ str += sizeof(" dB") - 1; @@ -1894,20 +1907,22 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops; int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); - const char *label; + struct fwnode_handle *fwnode; int ret; if (!indio_dev->info) return -EINVAL; iio_dev_opaque->driver_module = this_mod; - /* If the calling driver did not initialize of_node, do it here */ - if (!indio_dev->dev.of_node && indio_dev->dev.parent) - indio_dev->dev.of_node = indio_dev->dev.parent->of_node; - label = of_get_property(indio_dev->dev.of_node, "label", NULL); - if (label) - indio_dev->label = label; + /* If the calling driver did not initialize firmware node, do it here */ + if (dev_fwnode(&indio_dev->dev)) + fwnode = dev_fwnode(&indio_dev->dev); + else + fwnode = dev_fwnode(indio_dev->dev.parent); + device_set_node(&indio_dev->dev, fwnode); + + fwnode_property_read_string(fwnode, "label", &indio_dev->label); ret = iio_check_unique_scan_index(indio_dev); if (ret < 0) @@ -2059,6 +2074,19 @@ void iio_device_release_direct_mode(struct iio_dev *indio_dev) } EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); +/** + * iio_device_get_current_mode() - helper function providing read-only access to + * the opaque @currentmode variable + * @indio_dev: IIO device structure for device + */ +int iio_device_get_current_mode(struct iio_dev *indio_dev) +{ + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + return iio_dev_opaque->currentmode; +} +EXPORT_SYMBOL_GPL(iio_device_get_current_mode); + subsys_initcall(iio_init); module_exit(iio_exit); diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index ce8b102ce52f..b5e059e15b0a 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -274,7 +274,7 @@ static ssize_t iio_ev_state_store(struct device *dev, int ret; bool val; - ret = strtobool(buf, &val); + ret = kstrtobool(buf, &val); if (ret < 0) return ret; diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index f504ed351b3e..585b6cef8fcc 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -444,7 +444,7 @@ static ssize_t iio_trigger_write_current(struct device *dev, int ret; mutex_lock(&indio_dev->mlock); - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { mutex_unlock(&indio_dev->mlock); return -EBUSY; } diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index a62c7b4b8678..8537e88f02e3 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -155,7 +155,6 @@ config CM3323 config CM3605 tristate "Capella CM3605 ambient light and proximity sensor" - depends on OF help Say Y here if you want to build a driver for Capella CM3605 ambient light and short range proximity sensor. diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index 4141c0fa7bc4..09b831f9f40b 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -1003,7 +1003,6 @@ static int apds9960_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &apds9960_buffer_setup_ops); if (ret) return ret; diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index 1d02dfbc29d1..b578b46276cc 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -106,6 +106,7 @@ struct stk3310_data { struct mutex lock; bool als_enabled; bool ps_enabled; + uint32_t ps_near_level; u64 timestamp; struct regmap *regmap; struct regmap_field *reg_state; @@ -135,6 +136,25 @@ static const struct iio_event_spec stk3310_events[] = { }, }; +static ssize_t stk3310_read_near_level(struct iio_dev *indio_dev, + uintptr_t priv, + const struct iio_chan_spec *chan, + char *buf) +{ + struct stk3310_data *data = iio_priv(indio_dev); + + return sprintf(buf, "%u\n", data->ps_near_level); +} + +static const struct iio_chan_spec_ext_info stk3310_ext_info[] = { + { + .name = "nearlevel", + .shared = IIO_SEPARATE, + .read = stk3310_read_near_level, + }, + { /* sentinel */ } +}; + static const struct iio_chan_spec stk3310_channels[] = { { .type = IIO_LIGHT, @@ -151,6 +171,7 @@ static const struct iio_chan_spec stk3310_channels[] = { BIT(IIO_CHAN_INFO_INT_TIME), .event_spec = stk3310_events, .num_event_specs = ARRAY_SIZE(stk3310_events), + .ext_info = stk3310_ext_info, } }; @@ -581,6 +602,10 @@ static int stk3310_probe(struct i2c_client *client, data = iio_priv(indio_dev); data->client = client; i2c_set_clientdata(client, indio_dev); + + device_property_read_u32(&client->dev, "proximity-near-level", + &data->ps_near_level); + mutex_init(&data->lock); ret = stk3310_regmap_init(data); diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c index 729f14d9f2a4..dd9051f1cc1a 100644 --- a/drivers/iio/light/tsl2772.c +++ b/drivers/iio/light/tsl2772.c @@ -15,7 +15,9 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/property.h> #include <linux/slab.h> + #include <linux/iio/events.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -549,10 +551,10 @@ prox_poll_err: static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip) { - struct device_node *of_node = chip->client->dev.of_node; + struct device *dev = &chip->client->dev; int ret, tmp, i; - ret = of_property_read_u32(of_node, "led-max-microamp", &tmp); + ret = device_property_read_u32(dev, "led-max-microamp", &tmp); if (ret < 0) return ret; @@ -563,20 +565,18 @@ static int tsl2772_read_prox_led_current(struct tsl2772_chip *chip) } } - dev_err(&chip->client->dev, "Invalid value %d for led-max-microamp\n", - tmp); + dev_err(dev, "Invalid value %d for led-max-microamp\n", tmp); return -EINVAL; - } static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip) { - struct device_node *of_node = chip->client->dev.of_node; + struct device *dev = &chip->client->dev; int i, ret, num_leds, prox_diode_mask; u32 leds[TSL2772_MAX_PROX_LEDS]; - ret = of_property_count_u32_elems(of_node, "amstaos,proximity-diodes"); + ret = device_property_count_u32(dev, "amstaos,proximity-diodes"); if (ret < 0) return ret; @@ -584,12 +584,9 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip) if (num_leds > TSL2772_MAX_PROX_LEDS) num_leds = TSL2772_MAX_PROX_LEDS; - ret = of_property_read_u32_array(of_node, "amstaos,proximity-diodes", - leds, num_leds); + ret = device_property_read_u32_array(dev, "amstaos,proximity-diodes", leds, num_leds); if (ret < 0) { - dev_err(&chip->client->dev, - "Invalid value for amstaos,proximity-diodes: %d.\n", - ret); + dev_err(dev, "Invalid value for amstaos,proximity-diodes: %d.\n", ret); return ret; } @@ -600,9 +597,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip) else if (leds[i] == 1) prox_diode_mask |= TSL2772_DIODE1; else { - dev_err(&chip->client->dev, - "Invalid value %d in amstaos,proximity-diodes.\n", - leds[i]); + dev_err(dev, "Invalid value %d in amstaos,proximity-diodes.\n", leds[i]); return -EINVAL; } } diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index 54445365c4bc..07eb619bcfe8 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -9,7 +9,6 @@ menu "Magnetometer sensors" config AK8974 tristate "Asahi Kasei AK8974 3-Axis Magnetometer" depends on I2C - depends on OF select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c index 26195733ea3e..707ba25360b8 100644 --- a/drivers/iio/magnetometer/rm3100-core.c +++ b/drivers/iio/magnetometer/rm3100-core.c @@ -141,18 +141,10 @@ static irqreturn_t rm3100_irq_handler(int irq, void *d) struct iio_dev *indio_dev = d; struct rm3100_data *data = iio_priv(indio_dev); - switch (indio_dev->currentmode) { - case INDIO_DIRECT_MODE: + if (!iio_buffer_enabled(indio_dev)) complete(&data->measuring_done); - break; - case INDIO_BUFFER_TRIGGERED: + else iio_trigger_poll(data->drdy_trig); - break; - default: - dev_err(indio_dev->dev.parent, - "device mode out of control, current mode: %d", - indio_dev->currentmode); - } return IRQ_WAKE_THREAD; } @@ -377,7 +369,7 @@ static int rm3100_set_samp_freq(struct iio_dev *indio_dev, int val, int val2) goto unlock_return; } - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_buffer_enabled(indio_dev)) { /* Writing TMRC registers requires CMM reset. */ ret = regmap_write(regmap, RM3100_REG_CMM, 0); if (ret < 0) @@ -553,7 +545,6 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq) indio_dev->channels = rm3100_channels; indio_dev->num_channels = ARRAY_SIZE(rm3100_channels); indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED; - indio_dev->currentmode = INDIO_DIRECT_MODE; if (!irq) data->use_interrupt = false; diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 74435f4a427d..e2fd233b3626 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -540,24 +540,17 @@ read_error: static int st_magn_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { - int err; - switch (mask) { case IIO_CHAN_INFO_SCALE: - err = st_sensors_set_fullscale_by_gain(indio_dev, val2); - break; + return st_sensors_set_fullscale_by_gain(indio_dev, val2); case IIO_CHAN_INFO_SAMP_FREQ: if (val2) return -EINVAL; - mutex_lock(&indio_dev->mlock); - err = st_sensors_set_odr(indio_dev, val); - mutex_unlock(&indio_dev->mlock); - return err; + + return st_sensors_set_odr(indio_dev, val); default: - err = -EINVAL; + return -EINVAL; } - - return err; } static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL(); diff --git a/drivers/iio/multiplexer/Kconfig b/drivers/iio/multiplexer/Kconfig index a1e1332d1206..928f424a1ed3 100644 --- a/drivers/iio/multiplexer/Kconfig +++ b/drivers/iio/multiplexer/Kconfig @@ -9,7 +9,6 @@ menu "Multiplexers" config IIO_MUX tristate "IIO multiplexer driver" select MULTIPLEXER - depends on OF || COMPILE_TEST help Say yes here to build support for the IIO multiplexer. diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c index f422d44377df..93558fddfa9b 100644 --- a/drivers/iio/multiplexer/iio-mux.c +++ b/drivers/iio/multiplexer/iio-mux.c @@ -10,11 +10,12 @@ #include <linux/err.h> #include <linux/iio/consumer.h> #include <linux/iio/iio.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mux/consumer.h> -#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> struct mux_ext_info_cache { char *data; @@ -324,37 +325,21 @@ static int mux_configure_channel(struct device *dev, struct mux *mux, return 0; } -/* - * Same as of_property_for_each_string(), but also keeps track of the - * index of each string. - */ -#define of_property_for_each_string_index(np, propname, prop, s, i) \ - for (prop = of_find_property(np, propname, NULL), \ - s = of_prop_next_string(prop, NULL), \ - i = 0; \ - s; \ - s = of_prop_next_string(prop, s), \ - i++) - static int mux_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np = pdev->dev.of_node; struct iio_dev *indio_dev; struct iio_channel *parent; struct mux *mux; - struct property *prop; - const char *label; + const char **labels; + int all_children; + int children; u32 state; int sizeof_ext_info; - int children; int sizeof_priv; int i; int ret; - if (!np) - return -ENODEV; - parent = devm_iio_channel_get(dev, "parent"); if (IS_ERR(parent)) return dev_err_probe(dev, PTR_ERR(parent), @@ -366,9 +351,21 @@ static int mux_probe(struct platform_device *pdev) sizeof_ext_info *= sizeof(*mux->ext_info); } + all_children = device_property_string_array_count(dev, "channels"); + if (all_children < 0) + return all_children; + + labels = devm_kmalloc_array(dev, all_children, sizeof(*labels), GFP_KERNEL); + if (!labels) + return -ENOMEM; + + ret = device_property_read_string_array(dev, "channels", labels, all_children); + if (ret < 0) + return ret; + children = 0; - of_property_for_each_string(np, "channels", prop, label) { - if (*label) + for (state = 0; state < all_children; state++) { + if (*labels[state]) children++; } if (children <= 0) { @@ -395,7 +392,7 @@ static int mux_probe(struct platform_device *pdev) mux->cached_state = -1; mux->delay_us = 0; - of_property_read_u32(np, "settle-time-us", &mux->delay_us); + device_property_read_u32(dev, "settle-time-us", &mux->delay_us); indio_dev->name = dev_name(dev); indio_dev->info = &mux_info; @@ -426,11 +423,11 @@ static int mux_probe(struct platform_device *pdev) } i = 0; - of_property_for_each_string_index(np, "channels", prop, label, state) { - if (!*label) + for (state = 0; state < all_children; state++) { + if (!*labels[state]) continue; - ret = mux_configure_channel(dev, mux, state, label, i++); + ret = mux_configure_channel(dev, mux, state, labels[state], i++); if (ret < 0) return ret; } diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 5b93933a2e27..76913a2028d2 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -560,16 +560,12 @@ static int st_press_write_raw(struct iio_dev *indio_dev, int val2, long mask) { - int err; - switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: if (val2) return -EINVAL; - mutex_lock(&indio_dev->mlock); - err = st_sensors_set_odr(indio_dev, val); - mutex_unlock(&indio_dev->mlock); - return err; + + return st_sensors_set_odr(indio_dev, val); default: return -EINVAL; } diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c index ad4b1fb2607a..0bca5f74de68 100644 --- a/drivers/iio/proximity/mb1232.c +++ b/drivers/iio/proximity/mb1232.c @@ -10,12 +10,14 @@ * https://www.maxbotix.com/documents/I2CXL-MaxSonar-EZ_Datasheet.pdf */ +#include <linux/bitops.h> #include <linux/err.h> #include <linux/i2c.h> -#include <linux/of_irq.h> #include <linux/delay.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/bitops.h> +#include <linux/property.h> + #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> @@ -209,7 +211,7 @@ static int mb1232_probe(struct i2c_client *client, init_completion(&data->ranging); - data->irqnr = irq_of_parse_and_map(dev->of_node, 0); + data->irqnr = fwnode_irq_get(dev_fwnode(&client->dev), 0); if (data->irqnr <= 0) { /* usage of interrupt is optional */ data->irqnr = -1; diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c index 24a97d41e115..d56e037378de 100644 --- a/drivers/iio/proximity/ping.c +++ b/drivers/iio/proximity/ping.c @@ -29,9 +29,8 @@ #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/sched.h> @@ -288,7 +287,7 @@ static int ping_probe(struct platform_device *pdev) data = iio_priv(indio_dev); data->dev = dev; - data->cfg = of_device_get_match_data(dev); + data->cfg = device_get_match_data(dev); mutex_init(&data->lock); init_completion(&data->rising); diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c index 661a79ea200d..a284b20529fb 100644 --- a/drivers/iio/proximity/vl53l0x-i2c.c +++ b/drivers/iio/proximity/vl53l0x-i2c.c @@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data, u16 tries = 20; u8 buffer[12]; int ret; + unsigned long time_left; ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1); if (ret < 0) @@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data, if (data->client->irq) { reinit_completion(&data->completion); - ret = wait_for_completion_timeout(&data->completion, HZ/10); - if (ret < 0) - return ret; - else if (ret == 0) + time_left = wait_for_completion_timeout(&data->completion, HZ/10); + if (time_left == 0) return -ETIMEDOUT; vl53l0x_clear_irq(data); diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index 301c3f13fb26..4fc654275155 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -12,11 +12,15 @@ #include <linux/iio/iio.h> #include <linux/interrupt.h> #include <linux/list.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_gpio.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/spi/spi.h> +#include <asm/byteorder.h> +#include <asm/unaligned.h> + /* register map */ #define LTC2983_STATUS_REG 0x0000 #define LTC2983_TEMP_RES_START_REG 0x0010 @@ -219,7 +223,7 @@ struct ltc2983_sensor { struct ltc2983_custom_sensor { /* raw table sensor data */ - u8 *table; + void *table; size_t size; /* address offset */ s8 offset; @@ -377,25 +381,25 @@ static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st, return regmap_bulk_write(st->regmap, reg, custom->table, custom->size); } -static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new( - struct ltc2983_data *st, - const struct device_node *np, - const char *propname, - const bool is_steinhart, - const u32 resolution, - const bool has_signed) +static struct ltc2983_custom_sensor * +__ltc2983_custom_sensor_new(struct ltc2983_data *st, const struct fwnode_handle *fn, + const char *propname, const bool is_steinhart, + const u32 resolution, const bool has_signed) { struct ltc2983_custom_sensor *new_custom; - u8 index, n_entries, tbl = 0; struct device *dev = &st->spi->dev; /* * For custom steinhart, the full u32 is taken. For all the others * the MSB is discarded. */ const u8 n_size = is_steinhart ? 4 : 3; - const u8 e_size = is_steinhart ? sizeof(u32) : sizeof(u64); + u8 index, n_entries; + int ret; - n_entries = of_property_count_elems_of_size(np, propname, e_size); + if (is_steinhart) + n_entries = fwnode_property_count_u32(fn, propname); + else + n_entries = fwnode_property_count_u64(fn, propname); /* n_entries must be an even number */ if (!n_entries || (n_entries % 2) != 0) { dev_err(dev, "Number of entries either 0 or not even\n"); @@ -409,8 +413,8 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new( new_custom->size = n_entries * n_size; /* check Steinhart size */ if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) { - dev_err(dev, "Steinhart sensors size(%zu) must be 24", - new_custom->size); + dev_err(dev, "Steinhart sensors size(%zu) must be %u\n", new_custom->size, + LTC2983_CUSTOM_STEINHART_SIZE); return ERR_PTR(-EINVAL); } /* Check space on the table. */ @@ -423,21 +427,33 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new( } /* allocate the table */ - new_custom->table = devm_kzalloc(dev, new_custom->size, GFP_KERNEL); + if (is_steinhart) + new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u32), GFP_KERNEL); + else + new_custom->table = devm_kcalloc(dev, n_entries, sizeof(u64), GFP_KERNEL); if (!new_custom->table) return ERR_PTR(-ENOMEM); - for (index = 0; index < n_entries; index++) { - u64 temp = 0, j; - /* - * Steinhart sensors are configured with raw values in the - * devicetree. For the other sensors we must convert the - * value to raw. The odd index's correspond to temperarures - * and always have 1/1024 of resolution. Temperatures also - * come in kelvin, so signed values is not possible - */ - if (!is_steinhart) { - of_property_read_u64_index(np, propname, index, &temp); + /* + * Steinhart sensors are configured with raw values in the firmware + * node. For the other sensors we must convert the value to raw. + * The odd index's correspond to temperatures and always have 1/1024 + * of resolution. Temperatures also come in Kelvin, so signed values + * are not possible. + */ + if (is_steinhart) { + ret = fwnode_property_read_u32_array(fn, propname, new_custom->table, n_entries); + if (ret < 0) + return ERR_PTR(ret); + + cpu_to_be32_array(new_custom->table, new_custom->table, n_entries); + } else { + ret = fwnode_property_read_u64_array(fn, propname, new_custom->table, n_entries); + if (ret < 0) + return ERR_PTR(ret); + + for (index = 0; index < n_entries; index++) { + u64 temp = ((u64 *)new_custom->table)[index]; if ((index % 2) != 0) temp = __convert_to_raw(temp, 1024); @@ -445,16 +461,9 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new( temp = __convert_to_raw_sign(temp, resolution); else temp = __convert_to_raw(temp, resolution); - } else { - u32 t32; - of_property_read_u32_index(np, propname, index, &t32); - temp = t32; + put_unaligned_be24(temp, new_custom->table + index * 3); } - - for (j = 0; j < n_size; j++) - new_custom->table[tbl++] = - temp >> (8 * (n_size - j - 1)); } new_custom->is_steinhart = is_steinhart; @@ -597,13 +606,12 @@ static int ltc2983_adc_assign_chan(struct ltc2983_data *st, return __ltc2983_chan_assign_common(st, sensor, chan_val); } -static struct ltc2983_sensor *ltc2983_thermocouple_new( - const struct device_node *child, - struct ltc2983_data *st, - const struct ltc2983_sensor *sensor) +static struct ltc2983_sensor * +ltc2983_thermocouple_new(const struct fwnode_handle *child, struct ltc2983_data *st, + const struct ltc2983_sensor *sensor) { struct ltc2983_thermocouple *thermo; - struct device_node *phandle; + struct fwnode_handle *ref; u32 oc_current; int ret; @@ -611,11 +619,10 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new( if (!thermo) return ERR_PTR(-ENOMEM); - if (of_property_read_bool(child, "adi,single-ended")) + if (fwnode_property_read_bool(child, "adi,single-ended")) thermo->sensor_config = LTC2983_THERMOCOUPLE_SGL(1); - ret = of_property_read_u32(child, "adi,sensor-oc-current-microamp", - &oc_current); + ret = fwnode_property_read_u32(child, "adi,sensor-oc-current-microamp", &oc_current); if (!ret) { switch (oc_current) { case 10: @@ -651,20 +658,18 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new( return ERR_PTR(-EINVAL); } - phandle = of_parse_phandle(child, "adi,cold-junction-handle", 0); - if (phandle) { - int ret; - - ret = of_property_read_u32(phandle, "reg", - &thermo->cold_junction_chan); + ref = fwnode_find_reference(child, "adi,cold-junction-handle", 0); + if (IS_ERR(ref)) { + ref = NULL; + } else { + ret = fwnode_property_read_u32(ref, "reg", &thermo->cold_junction_chan); if (ret) { /* * This would be catched later but we can just return * the error right away. */ dev_err(&st->spi->dev, "Property reg must be given\n"); - of_node_put(phandle); - return ERR_PTR(-EINVAL); + goto fail; } } @@ -676,8 +681,8 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new( propname, false, 16384, true); if (IS_ERR(thermo->custom)) { - of_node_put(phandle); - return ERR_CAST(thermo->custom); + ret = PTR_ERR(thermo->custom); + goto fail; } } @@ -685,37 +690,41 @@ static struct ltc2983_sensor *ltc2983_thermocouple_new( thermo->sensor.fault_handler = ltc2983_thermocouple_fault_handler; thermo->sensor.assign_chan = ltc2983_thermocouple_assign_chan; - of_node_put(phandle); + fwnode_handle_put(ref); return &thermo->sensor; + +fail: + fwnode_handle_put(ref); + return ERR_PTR(ret); } -static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child, - struct ltc2983_data *st, - const struct ltc2983_sensor *sensor) +static struct ltc2983_sensor * +ltc2983_rtd_new(const struct fwnode_handle *child, struct ltc2983_data *st, + const struct ltc2983_sensor *sensor) { struct ltc2983_rtd *rtd; int ret = 0; struct device *dev = &st->spi->dev; - struct device_node *phandle; + struct fwnode_handle *ref; u32 excitation_current = 0, n_wires = 0; rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL); if (!rtd) return ERR_PTR(-ENOMEM); - phandle = of_parse_phandle(child, "adi,rsense-handle", 0); - if (!phandle) { + ref = fwnode_find_reference(child, "adi,rsense-handle", 0); + if (IS_ERR(ref)) { dev_err(dev, "Property adi,rsense-handle missing or invalid"); - return ERR_PTR(-EINVAL); + return ERR_CAST(ref); } - ret = of_property_read_u32(phandle, "reg", &rtd->r_sense_chan); + ret = fwnode_property_read_u32(ref, "reg", &rtd->r_sense_chan); if (ret) { dev_err(dev, "Property reg must be given\n"); goto fail; } - ret = of_property_read_u32(child, "adi,number-of-wires", &n_wires); + ret = fwnode_property_read_u32(child, "adi,number-of-wires", &n_wires); if (!ret) { switch (n_wires) { case 2: @@ -738,9 +747,9 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child, } } - if (of_property_read_bool(child, "adi,rsense-share")) { + if (fwnode_property_read_bool(child, "adi,rsense-share")) { /* Current rotation is only available with rsense sharing */ - if (of_property_read_bool(child, "adi,current-rotate")) { + if (fwnode_property_read_bool(child, "adi,current-rotate")) { if (n_wires == 2 || n_wires == 3) { dev_err(dev, "Rotation not allowed for 2/3 Wire RTDs"); @@ -803,8 +812,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child, "adi,custom-rtd", false, 2048, false); if (IS_ERR(rtd->custom)) { - of_node_put(phandle); - return ERR_CAST(rtd->custom); + ret = PTR_ERR(rtd->custom); + goto fail; } } @@ -812,8 +821,8 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child, rtd->sensor.fault_handler = ltc2983_common_fault_handler; rtd->sensor.assign_chan = ltc2983_rtd_assign_chan; - ret = of_property_read_u32(child, "adi,excitation-current-microamp", - &excitation_current); + ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp", + &excitation_current); if (ret) { /* default to 5uA */ rtd->excitation_current = 1; @@ -852,23 +861,22 @@ static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child, } } - of_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve); + fwnode_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve); - of_node_put(phandle); + fwnode_handle_put(ref); return &rtd->sensor; fail: - of_node_put(phandle); + fwnode_handle_put(ref); return ERR_PTR(ret); } -static struct ltc2983_sensor *ltc2983_thermistor_new( - const struct device_node *child, - struct ltc2983_data *st, - const struct ltc2983_sensor *sensor) +static struct ltc2983_sensor * +ltc2983_thermistor_new(const struct fwnode_handle *child, struct ltc2983_data *st, + const struct ltc2983_sensor *sensor) { struct ltc2983_thermistor *thermistor; struct device *dev = &st->spi->dev; - struct device_node *phandle; + struct fwnode_handle *ref; u32 excitation_current = 0; int ret = 0; @@ -876,23 +884,23 @@ static struct ltc2983_sensor *ltc2983_thermistor_new( if (!thermistor) return ERR_PTR(-ENOMEM); - phandle = of_parse_phandle(child, "adi,rsense-handle", 0); - if (!phandle) { + ref = fwnode_find_reference(child, "adi,rsense-handle", 0); + if (IS_ERR(ref)) { dev_err(dev, "Property adi,rsense-handle missing or invalid"); - return ERR_PTR(-EINVAL); + return ERR_CAST(ref); } - ret = of_property_read_u32(phandle, "reg", &thermistor->r_sense_chan); + ret = fwnode_property_read_u32(ref, "reg", &thermistor->r_sense_chan); if (ret) { dev_err(dev, "rsense channel must be configured...\n"); goto fail; } - if (of_property_read_bool(child, "adi,single-ended")) { + if (fwnode_property_read_bool(child, "adi,single-ended")) { thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1); - } else if (of_property_read_bool(child, "adi,rsense-share")) { + } else if (fwnode_property_read_bool(child, "adi,rsense-share")) { /* rotation is only possible if sharing rsense */ - if (of_property_read_bool(child, "adi,current-rotate")) + if (fwnode_property_read_bool(child, "adi,current-rotate")) thermistor->sensor_config = LTC2983_THERMISTOR_C_ROTATE(1); else @@ -926,16 +934,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new( steinhart, 64, false); if (IS_ERR(thermistor->custom)) { - of_node_put(phandle); - return ERR_CAST(thermistor->custom); + ret = PTR_ERR(thermistor->custom); + goto fail; } } /* set common parameters */ thermistor->sensor.fault_handler = ltc2983_common_fault_handler; thermistor->sensor.assign_chan = ltc2983_thermistor_assign_chan; - ret = of_property_read_u32(child, "adi,excitation-current-nanoamp", - &excitation_current); + ret = fwnode_property_read_u32(child, "adi,excitation-current-nanoamp", + &excitation_current); if (ret) { /* Auto range is not allowed for custom sensors */ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART) @@ -999,17 +1007,16 @@ static struct ltc2983_sensor *ltc2983_thermistor_new( } } - of_node_put(phandle); + fwnode_handle_put(ref); return &thermistor->sensor; fail: - of_node_put(phandle); + fwnode_handle_put(ref); return ERR_PTR(ret); } -static struct ltc2983_sensor *ltc2983_diode_new( - const struct device_node *child, - const struct ltc2983_data *st, - const struct ltc2983_sensor *sensor) +static struct ltc2983_sensor * +ltc2983_diode_new(const struct fwnode_handle *child, const struct ltc2983_data *st, + const struct ltc2983_sensor *sensor) { struct ltc2983_diode *diode; u32 temp = 0, excitation_current = 0; @@ -1019,13 +1026,13 @@ static struct ltc2983_sensor *ltc2983_diode_new( if (!diode) return ERR_PTR(-ENOMEM); - if (of_property_read_bool(child, "adi,single-ended")) + if (fwnode_property_read_bool(child, "adi,single-ended")) diode->sensor_config = LTC2983_DIODE_SGL(1); - if (of_property_read_bool(child, "adi,three-conversion-cycles")) + if (fwnode_property_read_bool(child, "adi,three-conversion-cycles")) diode->sensor_config |= LTC2983_DIODE_3_CONV_CYCLE(1); - if (of_property_read_bool(child, "adi,average-on")) + if (fwnode_property_read_bool(child, "adi,average-on")) diode->sensor_config |= LTC2983_DIODE_AVERAGE_ON(1); /* validate channel index */ @@ -1040,8 +1047,8 @@ static struct ltc2983_sensor *ltc2983_diode_new( diode->sensor.fault_handler = ltc2983_common_fault_handler; diode->sensor.assign_chan = ltc2983_diode_assign_chan; - ret = of_property_read_u32(child, "adi,excitation-current-microamp", - &excitation_current); + ret = fwnode_property_read_u32(child, "adi,excitation-current-microamp", + &excitation_current); if (!ret) { switch (excitation_current) { case 10: @@ -1064,7 +1071,7 @@ static struct ltc2983_sensor *ltc2983_diode_new( } } - of_property_read_u32(child, "adi,ideal-factor-value", &temp); + fwnode_property_read_u32(child, "adi,ideal-factor-value", &temp); /* 2^20 resolution */ diode->ideal_factor_value = __convert_to_raw(temp, 1048576); @@ -1072,7 +1079,7 @@ static struct ltc2983_sensor *ltc2983_diode_new( return &diode->sensor; } -static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child, +static struct ltc2983_sensor *ltc2983_r_sense_new(struct fwnode_handle *child, struct ltc2983_data *st, const struct ltc2983_sensor *sensor) { @@ -1091,7 +1098,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child, return ERR_PTR(-EINVAL); } - ret = of_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp); + ret = fwnode_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp); if (ret) { dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n"); return ERR_PTR(-EINVAL); @@ -1110,7 +1117,7 @@ static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child, return &rsense->sensor; } -static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child, +static struct ltc2983_sensor *ltc2983_adc_new(struct fwnode_handle *child, struct ltc2983_data *st, const struct ltc2983_sensor *sensor) { @@ -1120,7 +1127,7 @@ static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child, if (!adc) return ERR_PTR(-ENOMEM); - if (of_property_read_bool(child, "adi,single-ended")) + if (fwnode_property_read_bool(child, "adi,single-ended")) adc->single_ended = true; if (!adc->single_ended && @@ -1264,17 +1271,15 @@ static irqreturn_t ltc2983_irq_handler(int irq, void *data) static int ltc2983_parse_dt(struct ltc2983_data *st) { - struct device_node *child; struct device *dev = &st->spi->dev; + struct fwnode_handle *child; int ret = 0, chan = 0, channel_avail_mask = 0; - of_property_read_u32(dev->of_node, "adi,mux-delay-config-us", - &st->mux_delay_config); + device_property_read_u32(dev, "adi,mux-delay-config-us", &st->mux_delay_config); - of_property_read_u32(dev->of_node, "adi,filter-notch-freq", - &st->filter_notch_freq); + device_property_read_u32(dev, "adi,filter-notch-freq", &st->filter_notch_freq); - st->num_channels = of_get_available_child_count(dev->of_node); + st->num_channels = device_get_child_node_count(dev); if (!st->num_channels) { dev_err(&st->spi->dev, "At least one channel must be given!"); return -EINVAL; @@ -1286,10 +1291,10 @@ static int ltc2983_parse_dt(struct ltc2983_data *st) return -ENOMEM; st->iio_channels = st->num_channels; - for_each_available_child_of_node(dev->of_node, child) { + device_for_each_child_node(dev, child) { struct ltc2983_sensor sensor; - ret = of_property_read_u32(child, "reg", &sensor.chan); + ret = fwnode_property_read_u32(child, "reg", &sensor.chan); if (ret) { dev_err(dev, "reg property must given for child nodes\n"); goto put_child; @@ -1299,8 +1304,8 @@ static int ltc2983_parse_dt(struct ltc2983_data *st) if (sensor.chan < LTC2983_MIN_CHANNELS_NR || sensor.chan > LTC2983_MAX_CHANNELS_NR) { ret = -EINVAL; - dev_err(dev, - "chan:%d must be from 1 to 20\n", sensor.chan); + dev_err(dev, "chan:%d must be from %u to %u\n", sensor.chan, + LTC2983_MIN_CHANNELS_NR, LTC2983_MAX_CHANNELS_NR); goto put_child; } else if (channel_avail_mask & BIT(sensor.chan)) { ret = -EINVAL; @@ -1308,8 +1313,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st) goto put_child; } - ret = of_property_read_u32(child, "adi,sensor-type", - &sensor.type); + ret = fwnode_property_read_u32(child, "adi,sensor-type", &sensor.type); if (ret) { dev_err(dev, "adi,sensor-type property must given for child nodes\n"); @@ -1363,7 +1367,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st) return 0; put_child: - of_node_put(child); + fwnode_handle_put(child); return ret; } diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c index 54840881259a..8307aae2cb45 100644 --- a/drivers/iio/temperature/max31856.c +++ b/drivers/iio/temperature/max31856.c @@ -7,9 +7,11 @@ */ #include <linux/ctype.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> +#include <linux/property.h> #include <linux/spi/spi.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -422,9 +424,7 @@ static int max31856_probe(struct spi_device *spi) indio_dev->channels = max31856_channels; indio_dev->num_channels = ARRAY_SIZE(max31856_channels); - ret = of_property_read_u32(spi->dev.of_node, "thermocouple-type", - &data->thermocouple_type); - + ret = device_property_read_u32(&spi->dev, "thermocouple-type", &data->thermocouple_type); if (ret) { dev_info(&spi->dev, "Could not read thermocouple type DT property, configuring as a K-Type\n"); diff --git a/drivers/iio/temperature/max31865.c b/drivers/iio/temperature/max31865.c index 86c3f3509a26..e3bb78184c6e 100644 --- a/drivers/iio/temperature/max31865.c +++ b/drivers/iio/temperature/max31865.c @@ -12,9 +12,11 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> +#include <linux/property.h> #include <linux/spi/spi.h> #include <asm/unaligned.h> @@ -305,7 +307,7 @@ static int max31865_probe(struct spi_device *spi) indio_dev->channels = max31865_channels; indio_dev->num_channels = ARRAY_SIZE(max31865_channels); - if (of_property_read_bool(spi->dev.of_node, "maxim,3-wire")) { + if (device_property_read_bool(&spi->dev, "maxim,3-wire")) { /* select 3 wire */ data->three_wire = 1; } else { diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index 2a4b75897910..f1a8704e6cc1 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -176,16 +176,15 @@ out1: static int iio_sysfs_trigger_remove(int id) { - bool foundit = false; - struct iio_sysfs_trig *t; + struct iio_sysfs_trig *t = NULL, *iter; mutex_lock(&iio_sysfs_trig_list_mut); - list_for_each_entry(t, &iio_sysfs_trig_list, l) - if (id == t->id) { - foundit = true; + list_for_each_entry(iter, &iio_sysfs_trig_list, l) + if (id == iter->id) { + t = iter; break; } - if (!foundit) { + if (!t) { mutex_unlock(&iio_sysfs_trig_list_mut); return -EINVAL; } diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index 91353e651a52..22adff5d7f53 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -110,6 +110,15 @@ config INTERCONNECT_QCOM_SC8180X This is a driver for the Qualcomm Network-on-Chip on sc8180x-based platforms. +config INTERCONNECT_QCOM_SC8280XP + tristate "Qualcomm SC8280XP interconnect driver" + depends on INTERCONNECT_QCOM_RPMH_POSSIBLE + select INTERCONNECT_QCOM_RPMH + select INTERCONNECT_QCOM_BCM_VOTER + help + This is a driver for the Qualcomm Network-on-Chip on SC8280XP-based + platforms. + config INTERCONNECT_QCOM_SDM660 tristate "Qualcomm SDM660 interconnect driver" depends on INTERCONNECT_QCOM @@ -137,6 +146,15 @@ config INTERCONNECT_QCOM_SDX55 This is a driver for the Qualcomm Network-on-Chip on sdx55-based platforms. +config INTERCONNECT_QCOM_SDX65 + tristate "Qualcomm SDX65 interconnect driver" + depends on INTERCONNECT_QCOM_RPMH_POSSIBLE + select INTERCONNECT_QCOM_RPMH + select INTERCONNECT_QCOM_BCM_VOTER + help + This is a driver for the Qualcomm Network-on-Chip on sdx65-based + platforms. + config INTERCONNECT_QCOM_SM8150 tristate "Qualcomm SM8150 interconnect driver" depends on INTERCONNECT_QCOM_RPMH_POSSIBLE diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile index ceae9bb566c6..8d1fe9d38ac3 100644 --- a/drivers/interconnect/qcom/Makefile +++ b/drivers/interconnect/qcom/Makefile @@ -12,9 +12,11 @@ icc-rpmh-obj := icc-rpmh.o qnoc-sc7180-objs := sc7180.o qnoc-sc7280-objs := sc7280.o qnoc-sc8180x-objs := sc8180x.o +qnoc-sc8280xp-objs := sc8280xp.o qnoc-sdm660-objs := sdm660.o qnoc-sdm845-objs := sdm845.o qnoc-sdx55-objs := sdx55.o +qnoc-sdx65-objs := sdx65.o qnoc-sm8150-objs := sm8150.o qnoc-sm8250-objs := sm8250.o qnoc-sm8350-objs := sm8350.o @@ -33,9 +35,11 @@ obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o obj-$(CONFIG_INTERCONNECT_QCOM_SC7280) += qnoc-sc7280.o obj-$(CONFIG_INTERCONNECT_QCOM_SC8180X) += qnoc-sc8180x.o +obj-$(CONFIG_INTERCONNECT_QCOM_SC8280XP) += qnoc-sc8280xp.o obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o +obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c index 34125e8f8b60..fb013191c29b 100644 --- a/drivers/interconnect/qcom/icc-rpm.c +++ b/drivers/interconnect/qcom/icc-rpm.c @@ -274,20 +274,19 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) do_div(rate, qn->buswidth); rate = min_t(u64, rate, LONG_MAX); - if (qn->rate == rate) - return 0; - for (i = 0; i < qp->num_clks; i++) { + if (qp->bus_clk_rate[i] == rate) + continue; + ret = clk_set_rate(qp->bus_clks[i].clk, rate); if (ret) { pr_err("%s clk_set_rate error: %d\n", qp->bus_clks[i].id, ret); return ret; } + qp->bus_clk_rate[i] = rate; } - qn->rate = rate; - return 0; } @@ -301,7 +300,7 @@ int qnoc_probe(struct platform_device *pdev) const struct qcom_icc_desc *desc; struct icc_onecell_data *data; struct icc_provider *provider; - struct qcom_icc_node **qnodes; + struct qcom_icc_node * const *qnodes; struct qcom_icc_provider *qp; struct icc_node *node; size_t num_nodes, i; @@ -332,6 +331,11 @@ int qnoc_probe(struct platform_device *pdev) if (!qp) return -ENOMEM; + qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate), + GFP_KERNEL); + if (!qp->bus_clk_rate) + return -ENOMEM; + data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), GFP_KERNEL); if (!data) diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h index 26dad006034f..ebee9009301e 100644 --- a/drivers/interconnect/qcom/icc-rpm.h +++ b/drivers/interconnect/qcom/icc-rpm.h @@ -26,6 +26,7 @@ enum qcom_icc_type { * @type: the ICC provider type * @qos_offset: offset to QoS registers * @regmap: regmap for QoS registers read/write access + * @bus_clk_rate: bus clock rate in Hz */ struct qcom_icc_provider { struct icc_provider provider; @@ -33,6 +34,7 @@ struct qcom_icc_provider { enum qcom_icc_type type; struct regmap *regmap; unsigned int qos_offset; + u64 *bus_clk_rate; struct clk_bulk_data bus_clks[]; }; @@ -66,7 +68,6 @@ struct qcom_icc_qos { * @mas_rpm_id: RPM id for devices that are bus masters * @slv_rpm_id: RPM id for devices that are bus slaves * @qos: NoC QoS setting parameters - * @rate: current bus clock rate in Hz */ struct qcom_icc_node { unsigned char *name; @@ -77,11 +78,10 @@ struct qcom_icc_node { int mas_rpm_id; int slv_rpm_id; struct qcom_icc_qos qos; - u64 rate; }; struct qcom_icc_desc { - struct qcom_icc_node **nodes; + struct qcom_icc_node * const *nodes; size_t num_nodes; const char * const *clocks; size_t num_clocks; diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index 2c8e12549804..3c40076eb5fb 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -189,7 +189,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct icc_onecell_data *data; struct icc_provider *provider; - struct qcom_icc_node **qnodes, *qn; + struct qcom_icc_node * const *qnodes, *qn; struct qcom_icc_provider *qp; struct icc_node *node; size_t num_nodes, i, j; diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h index 4bfc060529ba..d29929461c17 100644 --- a/drivers/interconnect/qcom/icc-rpmh.h +++ b/drivers/interconnect/qcom/icc-rpmh.h @@ -22,7 +22,7 @@ struct qcom_icc_provider { struct icc_provider provider; struct device *dev; - struct qcom_icc_bcm **bcms; + struct qcom_icc_bcm * const *bcms; size_t num_bcms; struct bcm_voter *voter; }; @@ -112,9 +112,9 @@ struct qcom_icc_fabric { }; struct qcom_icc_desc { - struct qcom_icc_node **nodes; + struct qcom_icc_node * const *nodes; size_t num_nodes; - struct qcom_icc_bcm **bcms; + struct qcom_icc_bcm * const *bcms; size_t num_bcms; }; diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c index 2f397a7c3322..5c4ba2f37c8e 100644 --- a/drivers/interconnect/qcom/msm8916.c +++ b/drivers/interconnect/qcom/msm8916.c @@ -1191,7 +1191,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = { .links = snoc_pcnoc_slv_links, }; -static struct qcom_icc_node *msm8916_snoc_nodes[] = { +static struct qcom_icc_node * const msm8916_snoc_nodes[] = { [BIMC_SNOC_SLV] = &bimc_snoc_slv, [MASTER_JPEG] = &mas_jpeg, [MASTER_MDP_PORT0] = &mas_mdp, @@ -1228,7 +1228,7 @@ static const struct regmap_config msm8916_snoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc msm8916_snoc = { +static const struct qcom_icc_desc msm8916_snoc = { .type = QCOM_ICC_NOC, .nodes = msm8916_snoc_nodes, .num_nodes = ARRAY_SIZE(msm8916_snoc_nodes), @@ -1236,7 +1236,7 @@ static struct qcom_icc_desc msm8916_snoc = { .qos_offset = 0x7000, }; -static struct qcom_icc_node *msm8916_bimc_nodes[] = { +static struct qcom_icc_node * const msm8916_bimc_nodes[] = { [BIMC_SNOC_MAS] = &bimc_snoc_mas, [MASTER_AMPSS_M0] = &mas_apss, [MASTER_GRAPHICS_3D] = &mas_gfx, @@ -1256,7 +1256,7 @@ static const struct regmap_config msm8916_bimc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc msm8916_bimc = { +static const struct qcom_icc_desc msm8916_bimc = { .type = QCOM_ICC_BIMC, .nodes = msm8916_bimc_nodes, .num_nodes = ARRAY_SIZE(msm8916_bimc_nodes), @@ -1264,7 +1264,7 @@ static struct qcom_icc_desc msm8916_bimc = { .qos_offset = 0x8000, }; -static struct qcom_icc_node *msm8916_pcnoc_nodes[] = { +static struct qcom_icc_node * const msm8916_pcnoc_nodes[] = { [MASTER_BLSP_1] = &mas_blsp_1, [MASTER_DEHR] = &mas_dehr, [MASTER_LPASS] = &mas_audio, @@ -1325,7 +1325,7 @@ static const struct regmap_config msm8916_pcnoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc msm8916_pcnoc = { +static const struct qcom_icc_desc msm8916_pcnoc = { .type = QCOM_ICC_NOC, .nodes = msm8916_pcnoc_nodes, .num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes), diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c index f9c2d7d3100d..63b31deea722 100644 --- a/drivers/interconnect/qcom/msm8939.c +++ b/drivers/interconnect/qcom/msm8939.c @@ -1251,7 +1251,7 @@ static struct qcom_icc_node snoc_pcnoc_slv = { .links = snoc_pcnoc_slv_links, }; -static struct qcom_icc_node *msm8939_snoc_nodes[] = { +static struct qcom_icc_node * const msm8939_snoc_nodes[] = { [BIMC_SNOC_SLV] = &bimc_snoc_slv, [MASTER_QDSS_BAM] = &mas_qdss_bam, [MASTER_QDSS_ETR] = &mas_qdss_etr, @@ -1281,7 +1281,7 @@ static const struct regmap_config msm8939_snoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc msm8939_snoc = { +static const struct qcom_icc_desc msm8939_snoc = { .type = QCOM_ICC_NOC, .nodes = msm8939_snoc_nodes, .num_nodes = ARRAY_SIZE(msm8939_snoc_nodes), @@ -1289,7 +1289,7 @@ static struct qcom_icc_desc msm8939_snoc = { .qos_offset = 0x7000, }; -static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = { +static struct qcom_icc_node * const msm8939_snoc_mm_nodes[] = { [MASTER_VIDEO_P0] = &mas_video, [MASTER_JPEG] = &mas_jpeg, [MASTER_VFE] = &mas_vfe, @@ -1301,7 +1301,7 @@ static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = { [SNOC_MM_INT_2] = &mm_int_2, }; -static struct qcom_icc_desc msm8939_snoc_mm = { +static const struct qcom_icc_desc msm8939_snoc_mm = { .type = QCOM_ICC_NOC, .nodes = msm8939_snoc_mm_nodes, .num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes), @@ -1309,7 +1309,7 @@ static struct qcom_icc_desc msm8939_snoc_mm = { .qos_offset = 0x7000, }; -static struct qcom_icc_node *msm8939_bimc_nodes[] = { +static struct qcom_icc_node * const msm8939_bimc_nodes[] = { [BIMC_SNOC_MAS] = &bimc_snoc_mas, [MASTER_AMPSS_M0] = &mas_apss, [MASTER_GRAPHICS_3D] = &mas_gfx, @@ -1329,7 +1329,7 @@ static const struct regmap_config msm8939_bimc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc msm8939_bimc = { +static const struct qcom_icc_desc msm8939_bimc = { .type = QCOM_ICC_BIMC, .nodes = msm8939_bimc_nodes, .num_nodes = ARRAY_SIZE(msm8939_bimc_nodes), @@ -1337,7 +1337,7 @@ static struct qcom_icc_desc msm8939_bimc = { .qos_offset = 0x8000, }; -static struct qcom_icc_node *msm8939_pcnoc_nodes[] = { +static struct qcom_icc_node * const msm8939_pcnoc_nodes[] = { [MASTER_BLSP_1] = &mas_blsp_1, [MASTER_DEHR] = &mas_dehr, [MASTER_LPASS] = &mas_audio, @@ -1400,7 +1400,7 @@ static const struct regmap_config msm8939_pcnoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc msm8939_pcnoc = { +static const struct qcom_icc_desc msm8939_pcnoc = { .type = QCOM_ICC_NOC, .nodes = msm8939_pcnoc_nodes, .num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes), diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c index da68ce375a89..6fa0ad90fc3d 100644 --- a/drivers/interconnect/qcom/msm8974.c +++ b/drivers/interconnect/qcom/msm8974.c @@ -220,7 +220,7 @@ struct msm8974_icc_node { }; struct msm8974_icc_desc { - struct msm8974_icc_node **nodes; + struct msm8974_icc_node * const *nodes; size_t num_nodes; }; @@ -244,7 +244,7 @@ DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC, DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0); DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1); -static struct msm8974_icc_node *msm8974_bimc_nodes[] = { +static struct msm8974_icc_node * const msm8974_bimc_nodes[] = { [BIMC_MAS_AMPSS_M0] = &mas_ampss_m0, [BIMC_MAS_AMPSS_M1] = &mas_ampss_m1, [BIMC_MAS_MSS_PROC] = &mas_mss_proc, @@ -254,7 +254,7 @@ static struct msm8974_icc_node *msm8974_bimc_nodes[] = { [BIMC_SLV_AMPSS_L2] = &slv_ampss_l2, }; -static struct msm8974_icc_desc msm8974_bimc = { +static const struct msm8974_icc_desc msm8974_bimc = { .nodes = msm8974_bimc_nodes, .num_nodes = ARRAY_SIZE(msm8974_bimc_nodes), }; @@ -297,7 +297,7 @@ DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73); DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74); DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76); -static struct msm8974_icc_node *msm8974_cnoc_nodes[] = { +static struct msm8974_icc_node * const msm8974_cnoc_nodes[] = { [CNOC_MAS_RPM_INST] = &mas_rpm_inst, [CNOC_MAS_RPM_DATA] = &mas_rpm_data, [CNOC_MAS_RPM_SYS] = &mas_rpm_sys, @@ -337,7 +337,7 @@ static struct msm8974_icc_node *msm8974_cnoc_nodes[] = { [CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc, }; -static struct msm8974_icc_desc msm8974_cnoc = { +static const struct msm8974_icc_desc msm8974_cnoc = { .nodes = msm8974_cnoc_nodes, .num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes), }; @@ -365,7 +365,7 @@ DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14); DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15); DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17); -static struct msm8974_icc_node *msm8974_mnoc_nodes[] = { +static struct msm8974_icc_node * const msm8974_mnoc_nodes[] = { [MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d, [MNOC_MAS_JPEG] = &mas_jpeg, [MNOC_MAS_MDP_PORT0] = &mas_mdp_port0, @@ -390,7 +390,7 @@ static struct msm8974_icc_node *msm8974_mnoc_nodes[] = { [MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc, }; -static struct msm8974_icc_desc msm8974_mnoc = { +static const struct msm8974_icc_desc msm8974_mnoc = { .nodes = msm8974_mnoc_nodes, .num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes), }; @@ -410,7 +410,7 @@ DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MS DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80); DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC); -static struct msm8974_icc_node *msm8974_onoc_nodes[] = { +static struct msm8974_icc_node * const msm8974_onoc_nodes[] = { [OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc, [OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem, [OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem, @@ -425,7 +425,7 @@ static struct msm8974_icc_node *msm8974_onoc_nodes[] = { [OCMEM_SLV_OCMEM] = &slv_ocmem, }; -static struct msm8974_icc_desc msm8974_onoc = { +static const struct msm8974_icc_desc msm8974_onoc = { .nodes = msm8974_onoc_nodes, .num_nodes = ARRAY_SIZE(msm8974_onoc_nodes), }; @@ -458,7 +458,7 @@ DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43); DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC); DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46); -static struct msm8974_icc_node *msm8974_pnoc_nodes[] = { +static struct msm8974_icc_node * const msm8974_pnoc_nodes[] = { [PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg, [PNOC_MAS_SDCC_1] = &mas_sdcc_1, [PNOC_MAS_SDCC_3] = &mas_sdcc_3, @@ -488,7 +488,7 @@ static struct msm8974_icc_node *msm8974_pnoc_nodes[] = { [PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc, }; -static struct msm8974_icc_desc msm8974_pnoc = { +static const struct msm8974_icc_desc msm8974_pnoc = { .nodes = msm8974_pnoc_nodes, .num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes), }; @@ -518,7 +518,7 @@ DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27); DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29); DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30); -static struct msm8974_icc_node *msm8974_snoc_nodes[] = { +static struct msm8974_icc_node * const msm8974_snoc_nodes[] = { [SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb, [SNOC_MAS_QDSS_BAM] = &mas_qdss_bam, [SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg, @@ -545,7 +545,7 @@ static struct msm8974_icc_node *msm8974_snoc_nodes[] = { [SNOC_SLV_QDSS_STM] = &slv_qdss_stm, }; -static struct msm8974_icc_desc msm8974_snoc = { +static const struct msm8974_icc_desc msm8974_snoc = { .nodes = msm8974_snoc_nodes, .num_nodes = ARRAY_SIZE(msm8974_snoc_nodes), }; @@ -648,7 +648,7 @@ static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak) static int msm8974_icc_probe(struct platform_device *pdev) { const struct msm8974_icc_desc *desc; - struct msm8974_icc_node **qnodes; + struct msm8974_icc_node * const *qnodes; struct msm8974_icc_provider *qp; struct device *dev = &pdev->dev; struct icc_onecell_data *data; diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c index 499e11fbbd2e..c2903ae3b3bc 100644 --- a/drivers/interconnect/qcom/msm8996.c +++ b/drivers/interconnect/qcom/msm8996.c @@ -1796,7 +1796,7 @@ static struct qcom_icc_node slv_srvc_snoc = { .qos.qos_mode = NOC_QOS_MODE_INVALID }; -static struct qcom_icc_node *a0noc_nodes[] = { +static struct qcom_icc_node * const a0noc_nodes[] = { [MASTER_PCIE_0] = &mas_pcie_0, [MASTER_PCIE_1] = &mas_pcie_1, [MASTER_PCIE_2] = &mas_pcie_2 @@ -1820,7 +1820,7 @@ static const struct qcom_icc_desc msm8996_a0noc = { .regmap_cfg = &msm8996_a0noc_regmap_config }; -static struct qcom_icc_node *a1noc_nodes[] = { +static struct qcom_icc_node * const a1noc_nodes[] = { [MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc, [MASTER_CRYPTO_CORE0] = &mas_crypto_c0, [MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc @@ -1841,7 +1841,7 @@ static const struct qcom_icc_desc msm8996_a1noc = { .regmap_cfg = &msm8996_a1noc_regmap_config }; -static struct qcom_icc_node *a2noc_nodes[] = { +static struct qcom_icc_node * const a2noc_nodes[] = { [MASTER_USB3] = &mas_usb3, [MASTER_IPA] = &mas_ipa, [MASTER_UFS] = &mas_ufs @@ -1862,7 +1862,7 @@ static const struct qcom_icc_desc msm8996_a2noc = { .regmap_cfg = &msm8996_a2noc_regmap_config }; -static struct qcom_icc_node *bimc_nodes[] = { +static struct qcom_icc_node * const bimc_nodes[] = { [MASTER_AMPSS_M0] = &mas_apps_proc, [MASTER_GRAPHICS_3D] = &mas_oxili, [MASTER_MNOC_BIMC] = &mas_mnoc_bimc, @@ -1888,7 +1888,7 @@ static const struct qcom_icc_desc msm8996_bimc = { .regmap_cfg = &msm8996_bimc_regmap_config }; -static struct qcom_icc_node *cnoc_nodes[] = { +static struct qcom_icc_node * const cnoc_nodes[] = { [MASTER_SNOC_CNOC] = &mas_snoc_cnoc, [MASTER_QDSS_DAP] = &mas_qdss_dap, [SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc, @@ -1946,7 +1946,7 @@ static const struct qcom_icc_desc msm8996_cnoc = { .regmap_cfg = &msm8996_cnoc_regmap_config }; -static struct qcom_icc_node *mnoc_nodes[] = { +static struct qcom_icc_node * const mnoc_nodes[] = { [MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg, [MASTER_CPP] = &mas_cpp, [MASTER_JPEG] = &mas_jpeg, @@ -2001,7 +2001,7 @@ static const struct qcom_icc_desc msm8996_mnoc = { .regmap_cfg = &msm8996_mnoc_regmap_config }; -static struct qcom_icc_node *pnoc_nodes[] = { +static struct qcom_icc_node * const pnoc_nodes[] = { [MASTER_SNOC_PNOC] = &mas_snoc_pnoc, [MASTER_SDCC_1] = &mas_sdcc_1, [MASTER_SDCC_2] = &mas_sdcc_2, @@ -2037,7 +2037,7 @@ static const struct qcom_icc_desc msm8996_pnoc = { .regmap_cfg = &msm8996_pnoc_regmap_config }; -static struct qcom_icc_node *snoc_nodes[] = { +static struct qcom_icc_node * const snoc_nodes[] = { [MASTER_HMSS] = &mas_hmss, [MASTER_QDSS_BAM] = &mas_qdss_bam, [MASTER_SNOC_CFG] = &mas_snoc_cfg, diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index eec13099a6a3..4198656f4e59 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -67,7 +67,7 @@ struct qcom_osm_l3_node { }; struct qcom_osm_l3_desc { - const struct qcom_osm_l3_node **nodes; + const struct qcom_osm_l3_node * const *nodes; size_t num_nodes; unsigned int lut_row_size; unsigned int reg_freq_lut; @@ -86,7 +86,7 @@ struct qcom_osm_l3_desc { DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3); DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16); -static const struct qcom_osm_l3_node *sdm845_osm_l3_nodes[] = { +static const struct qcom_osm_l3_node * const sdm845_osm_l3_nodes[] = { [MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3, [SLAVE_OSM_L3] = &sdm845_osm_l3, }; @@ -102,7 +102,7 @@ static const struct qcom_osm_l3_desc sdm845_icc_osm_l3 = { DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3); DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16); -static const struct qcom_osm_l3_node *sc7180_osm_l3_nodes[] = { +static const struct qcom_osm_l3_node * const sc7180_osm_l3_nodes[] = { [MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3, [SLAVE_OSM_L3] = &sc7180_osm_l3, }; @@ -118,7 +118,7 @@ static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = { DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3); DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32); -static const struct qcom_osm_l3_node *sc7280_epss_l3_nodes[] = { +static const struct qcom_osm_l3_node * const sc7280_epss_l3_nodes[] = { [MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3, [SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3, }; @@ -134,7 +134,7 @@ static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = { DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3); DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32); -static const struct qcom_osm_l3_node *sc8180x_osm_l3_nodes[] = { +static const struct qcom_osm_l3_node * const sc8180x_osm_l3_nodes[] = { [MASTER_OSM_L3_APPS] = &sc8180x_osm_apps_l3, [SLAVE_OSM_L3] = &sc8180x_osm_l3, }; @@ -150,7 +150,7 @@ static const struct qcom_osm_l3_desc sc8180x_icc_osm_l3 = { DEFINE_QNODE(sm8150_osm_apps_l3, SM8150_MASTER_OSM_L3_APPS, 32, SM8150_SLAVE_OSM_L3); DEFINE_QNODE(sm8150_osm_l3, SM8150_SLAVE_OSM_L3, 32); -static const struct qcom_osm_l3_node *sm8150_osm_l3_nodes[] = { +static const struct qcom_osm_l3_node * const sm8150_osm_l3_nodes[] = { [MASTER_OSM_L3_APPS] = &sm8150_osm_apps_l3, [SLAVE_OSM_L3] = &sm8150_osm_l3, }; @@ -166,7 +166,7 @@ static const struct qcom_osm_l3_desc sm8150_icc_osm_l3 = { DEFINE_QNODE(sm8250_epss_apps_l3, SM8250_MASTER_EPSS_L3_APPS, 32, SM8250_SLAVE_EPSS_L3); DEFINE_QNODE(sm8250_epss_l3, SM8250_SLAVE_EPSS_L3, 32); -static const struct qcom_osm_l3_node *sm8250_epss_l3_nodes[] = { +static const struct qcom_osm_l3_node * const sm8250_epss_l3_nodes[] = { [MASTER_EPSS_L3_APPS] = &sm8250_epss_apps_l3, [SLAVE_EPSS_L3_SHARED] = &sm8250_epss_l3, }; @@ -228,7 +228,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) const struct qcom_osm_l3_desc *desc; struct icc_onecell_data *data; struct icc_provider *provider; - const struct qcom_osm_l3_node **qnodes; + const struct qcom_osm_l3_node * const *qnodes; struct icc_node *node; size_t num_nodes; struct clk *clk; diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c index 74404e0b2080..0da612d6398c 100644 --- a/drivers/interconnect/qcom/qcm2290.c +++ b/drivers/interconnect/qcom/qcm2290.c @@ -1174,7 +1174,7 @@ static struct qcom_icc_node slv_anoc_snoc = { }; /* NoC descriptors */ -static struct qcom_icc_node *qcm2290_bimc_nodes[] = { +static struct qcom_icc_node * const qcm2290_bimc_nodes[] = { [MASTER_APPSS_PROC] = &mas_appss_proc, [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt, [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt, @@ -1193,7 +1193,7 @@ static const struct regmap_config qcm2290_bimc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc qcm2290_bimc = { +static const struct qcom_icc_desc qcm2290_bimc = { .type = QCOM_ICC_BIMC, .nodes = qcm2290_bimc_nodes, .num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes), @@ -1202,7 +1202,7 @@ static struct qcom_icc_desc qcm2290_bimc = { .qos_offset = 0x8000, }; -static struct qcom_icc_node *qcm2290_cnoc_nodes[] = { +static struct qcom_icc_node * const qcm2290_cnoc_nodes[] = { [MASTER_SNOC_CNOC] = &mas_snoc_cnoc, [MASTER_QDSS_DAP] = &mas_qdss_dap, [SLAVE_BIMC_CFG] = &slv_bimc_cfg, @@ -1248,14 +1248,14 @@ static const struct regmap_config qcm2290_cnoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc qcm2290_cnoc = { +static const struct qcom_icc_desc qcm2290_cnoc = { .type = QCOM_ICC_NOC, .nodes = qcm2290_cnoc_nodes, .num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes), .regmap_cfg = &qcm2290_cnoc_regmap_config, }; -static struct qcom_icc_node *qcm2290_snoc_nodes[] = { +static struct qcom_icc_node * const qcm2290_snoc_nodes[] = { [MASTER_CRYPTO_CORE0] = &mas_crypto_core0, [MASTER_SNOC_CFG] = &mas_snoc_cfg, [MASTER_TIC] = &mas_tic, @@ -1289,7 +1289,7 @@ static const struct regmap_config qcm2290_snoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc qcm2290_snoc = { +static const struct qcom_icc_desc qcm2290_snoc = { .type = QCOM_ICC_QNOC, .nodes = qcm2290_snoc_nodes, .num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes), @@ -1298,25 +1298,25 @@ static struct qcom_icc_desc qcm2290_snoc = { .qos_offset = 0x15000, }; -static struct qcom_icc_node *qcm2290_qup_virt_nodes[] = { +static struct qcom_icc_node * const qcm2290_qup_virt_nodes[] = { [MASTER_QUP_CORE_0] = &mas_qup_core_0, [SLAVE_QUP_CORE_0] = &slv_qup_core_0 }; -static struct qcom_icc_desc qcm2290_qup_virt = { +static const struct qcom_icc_desc qcm2290_qup_virt = { .type = QCOM_ICC_QNOC, .nodes = qcm2290_qup_virt_nodes, .num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes), }; -static struct qcom_icc_node *qcm2290_mmnrt_virt_nodes[] = { +static struct qcom_icc_node * const qcm2290_mmnrt_virt_nodes[] = { [MASTER_CAMNOC_SF] = &mas_camnoc_sf, [MASTER_VIDEO_P0] = &mas_video_p0, [MASTER_VIDEO_PROC] = &mas_video_proc, [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt, }; -static struct qcom_icc_desc qcm2290_mmnrt_virt = { +static const struct qcom_icc_desc qcm2290_mmnrt_virt = { .type = QCOM_ICC_QNOC, .nodes = qcm2290_mmnrt_virt_nodes, .num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes), @@ -1324,13 +1324,13 @@ static struct qcom_icc_desc qcm2290_mmnrt_virt = { .qos_offset = 0x15000, }; -static struct qcom_icc_node *qcm2290_mmrt_virt_nodes[] = { +static struct qcom_icc_node * const qcm2290_mmrt_virt_nodes[] = { [MASTER_CAMNOC_HF] = &mas_camnoc_hf, [MASTER_MDP0] = &mas_mdp0, [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt, }; -static struct qcom_icc_desc qcm2290_mmrt_virt = { +static const struct qcom_icc_desc qcm2290_mmrt_virt = { .type = QCOM_ICC_QNOC, .nodes = qcm2290_mmrt_virt_nodes, .num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes), diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c index 416c8bff8efa..fae155344332 100644 --- a/drivers/interconnect/qcom/qcs404.c +++ b/drivers/interconnect/qcom/qcs404.c @@ -974,7 +974,7 @@ static struct qcom_icc_node slv_lpass = { .slv_rpm_id = -1, }; -static struct qcom_icc_node *qcs404_bimc_nodes[] = { +static struct qcom_icc_node * const qcs404_bimc_nodes[] = { [MASTER_AMPSS_M0] = &mas_apps_proc, [MASTER_OXILI] = &mas_oxili, [MASTER_MDP_PORT0] = &mas_mdp, @@ -984,12 +984,12 @@ static struct qcom_icc_node *qcs404_bimc_nodes[] = { [SLAVE_BIMC_SNOC] = &slv_bimc_snoc, }; -static struct qcom_icc_desc qcs404_bimc = { +static const struct qcom_icc_desc qcs404_bimc = { .nodes = qcs404_bimc_nodes, .num_nodes = ARRAY_SIZE(qcs404_bimc_nodes), }; -static struct qcom_icc_node *qcs404_pcnoc_nodes[] = { +static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = { [MASTER_SPDM] = &mas_spdm, [MASTER_BLSP_1] = &mas_blsp_1, [MASTER_BLSP_2] = &mas_blsp_2, @@ -1038,12 +1038,12 @@ static struct qcom_icc_node *qcs404_pcnoc_nodes[] = { [SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc, }; -static struct qcom_icc_desc qcs404_pcnoc = { +static const struct qcom_icc_desc qcs404_pcnoc = { .nodes = qcs404_pcnoc_nodes, .num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes), }; -static struct qcom_icc_node *qcs404_snoc_nodes[] = { +static struct qcom_icc_node * const qcs404_snoc_nodes[] = { [MASTER_QDSS_BAM] = &mas_qdss_bam, [MASTER_BIMC_SNOC] = &mas_bimc_snoc, [MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc, @@ -1066,7 +1066,7 @@ static struct qcom_icc_node *qcs404_snoc_nodes[] = { [SLAVE_LPASS] = &slv_lpass, }; -static struct qcom_icc_desc qcs404_snoc = { +static const struct qcom_icc_desc qcs404_snoc = { .nodes = qcs404_snoc_nodes, .num_nodes = ARRAY_SIZE(qcs404_snoc_nodes), }; diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c index 5f7c0f85fa8e..35cd448efdfb 100644 --- a/drivers/interconnect/qcom/sc7180.c +++ b/drivers/interconnect/qcom/sc7180.c @@ -178,11 +178,11 @@ DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc); DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc); DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc); -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { &bcm_cn1, }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg, [MASTER_QSPI] = &qhm_qspi, [MASTER_QUP_0] = &qhm_qup_0, @@ -193,18 +193,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, }; -static struct qcom_icc_desc sc7180_aggre1_noc = { +static const struct qcom_icc_desc sc7180_aggre1_noc = { .nodes = aggre1_noc_nodes, .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), .bcms = aggre1_noc_bcms, .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_ce0, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg, [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_QUP_1] = &qhm_qup_1, @@ -216,56 +216,56 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, }; -static struct qcom_icc_desc sc7180_aggre2_noc = { +static const struct qcom_icc_desc sc7180_aggre2_noc = { .nodes = aggre2_noc_nodes, .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), .bcms = aggre2_noc_bcms, .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), }; -static struct qcom_icc_bcm *camnoc_virt_bcms[] = { +static struct qcom_icc_bcm * const camnoc_virt_bcms[] = { &bcm_mm1, }; -static struct qcom_icc_node *camnoc_virt_nodes[] = { +static struct qcom_icc_node * const camnoc_virt_nodes[] = { [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp, [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp, [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp, [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp, }; -static struct qcom_icc_desc sc7180_camnoc_virt = { +static const struct qcom_icc_desc sc7180_camnoc_virt = { .nodes = camnoc_virt_nodes, .num_nodes = ARRAY_SIZE(camnoc_virt_nodes), .bcms = camnoc_virt_bcms, .num_bcms = ARRAY_SIZE(camnoc_virt_bcms), }; -static struct qcom_icc_bcm *compute_noc_bcms[] = { +static struct qcom_icc_bcm * const compute_noc_bcms[] = { &bcm_co0, &bcm_co2, &bcm_co3, }; -static struct qcom_icc_node *compute_noc_nodes[] = { +static struct qcom_icc_node * const compute_noc_nodes[] = { [MASTER_NPU] = &qnm_npu, [MASTER_NPU_PROC] = &qxm_npu_dsp, [SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc, }; -static struct qcom_icc_desc sc7180_compute_noc = { +static const struct qcom_icc_desc sc7180_compute_noc = { .nodes = compute_noc_nodes, .num_nodes = ARRAY_SIZE(compute_noc_nodes), .bcms = compute_noc_bcms, .num_bcms = ARRAY_SIZE(compute_noc_bcms), }; -static struct qcom_icc_bcm *config_noc_bcms[] = { +static struct qcom_icc_bcm * const config_noc_bcms[] = { &bcm_cn0, &bcm_cn1, }; -static struct qcom_icc_node *config_noc_nodes[] = { +static struct qcom_icc_node * const config_noc_nodes[] = { [MASTER_SNOC_CNOC] = &qnm_snoc, [MASTER_QDSS_DAP] = &xm_qdss_dap, [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg, @@ -321,32 +321,32 @@ static struct qcom_icc_node *config_noc_nodes[] = { [SLAVE_SERVICE_CNOC] = &srvc_cnoc, }; -static struct qcom_icc_desc sc7180_config_noc = { +static const struct qcom_icc_desc sc7180_config_noc = { .nodes = config_noc_nodes, .num_nodes = ARRAY_SIZE(config_noc_nodes), .bcms = config_noc_bcms, .num_bcms = ARRAY_SIZE(config_noc_bcms), }; -static struct qcom_icc_node *dc_noc_nodes[] = { +static struct qcom_icc_node * const dc_noc_nodes[] = { [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc, [SLAVE_GEM_NOC_CFG] = &qhs_gemnoc, [SLAVE_LLCC_CFG] = &qhs_llcc, }; -static struct qcom_icc_desc sc7180_dc_noc = { +static const struct qcom_icc_desc sc7180_dc_noc = { .nodes = dc_noc_nodes, .num_nodes = ARRAY_SIZE(dc_noc_nodes), }; -static struct qcom_icc_bcm *gem_noc_bcms[] = { +static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh2, &bcm_sh3, &bcm_sh4, }; -static struct qcom_icc_node *gem_noc_nodes[] = { +static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_APPSS_PROC] = &acm_apps0, [MASTER_SYS_TCU] = &acm_sys_tcu, [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg, @@ -362,7 +362,7 @@ static struct qcom_icc_node *gem_noc_nodes[] = { [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc, }; -static struct qcom_icc_desc sc7180_gem_noc = { +static const struct qcom_icc_desc sc7180_gem_noc = { .nodes = gem_noc_nodes, .num_nodes = ARRAY_SIZE(gem_noc_nodes), .bcms = gem_noc_bcms, @@ -374,25 +374,25 @@ static struct qcom_icc_bcm *mc_virt_bcms[] = { &bcm_mc0, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI1] = &ebi, }; -static struct qcom_icc_desc sc7180_mc_virt = { +static const struct qcom_icc_desc sc7180_mc_virt = { .nodes = mc_virt_nodes, .num_nodes = ARRAY_SIZE(mc_virt_nodes), .bcms = mc_virt_bcms, .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm2, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg, [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0, [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1, @@ -406,14 +406,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_SERVICE_MNOC] = &srvc_mnoc, }; -static struct qcom_icc_desc sc7180_mmss_noc = { +static const struct qcom_icc_desc sc7180_mmss_noc = { .nodes = mmss_noc_nodes, .num_nodes = ARRAY_SIZE(mmss_noc_nodes), .bcms = mmss_noc_bcms, .num_bcms = ARRAY_SIZE(mmss_noc_bcms), }; -static struct qcom_icc_node *npu_noc_nodes[] = { +static struct qcom_icc_node * const npu_noc_nodes[] = { [MASTER_NPU_SYS] = &amm_npu_sys, [MASTER_NPU_NOC_CFG] = &qhm_npu_cfg, [SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0, @@ -427,30 +427,30 @@ static struct qcom_icc_node *npu_noc_nodes[] = { [SLAVE_SERVICE_NPU_NOC] = &srvc_noc, }; -static struct qcom_icc_desc sc7180_npu_noc = { +static const struct qcom_icc_desc sc7180_npu_noc = { .nodes = npu_noc_nodes, .num_nodes = ARRAY_SIZE(npu_noc_nodes), }; -static struct qcom_icc_bcm *qup_virt_bcms[] = { +static struct qcom_icc_bcm * const qup_virt_bcms[] = { &bcm_qup0, }; -static struct qcom_icc_node *qup_virt_nodes[] = { +static struct qcom_icc_node * const qup_virt_nodes[] = { [MASTER_QUP_CORE_0] = &qup_core_master_1, [MASTER_QUP_CORE_1] = &qup_core_master_2, [SLAVE_QUP_CORE_0] = &qup_core_slave_1, [SLAVE_QUP_CORE_1] = &qup_core_slave_2, }; -static struct qcom_icc_desc sc7180_qup_virt = { +static const struct qcom_icc_desc sc7180_qup_virt = { .nodes = qup_virt_nodes, .num_nodes = ARRAY_SIZE(qup_virt_nodes), .bcms = qup_virt_bcms, .num_bcms = ARRAY_SIZE(qup_virt_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn1, &bcm_sn2, @@ -461,7 +461,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = { &bcm_sn12, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_SNOC_CFG] = &qhm_snoc_cfg, [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, @@ -478,7 +478,7 @@ static struct qcom_icc_node *system_noc_nodes[] = { [SLAVE_TCU] = &xs_sys_tcu_cfg, }; -static struct qcom_icc_desc sc7180_system_noc = { +static const struct qcom_icc_desc sc7180_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c index f8b34f6cbb0d..971f538bc98a 100644 --- a/drivers/interconnect/qcom/sc7280.c +++ b/drivers/interconnect/qcom/sc7280.c @@ -1476,13 +1476,13 @@ static struct qcom_icc_bcm bcm_sn14 = { .nodes = { &qns_pcie_mem_noc }, }; -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { &bcm_sn5, &bcm_sn6, &bcm_sn14, }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_QSPI_0] = &qhm_qspi, [MASTER_QUP_0] = &qhm_qup0, [MASTER_QUP_1] = &qhm_qup1, @@ -1500,18 +1500,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, }; -static struct qcom_icc_desc sc7280_aggre1_noc = { +static const struct qcom_icc_desc sc7280_aggre1_noc = { .nodes = aggre1_noc_nodes, .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), .bcms = aggre1_noc_bcms, .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_ce0, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg, [MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath, @@ -1522,38 +1522,38 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, }; -static struct qcom_icc_desc sc7280_aggre2_noc = { +static const struct qcom_icc_desc sc7280_aggre2_noc = { .nodes = aggre2_noc_nodes, .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), .bcms = aggre2_noc_bcms, .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), }; -static struct qcom_icc_bcm *clk_virt_bcms[] = { +static struct qcom_icc_bcm * const clk_virt_bcms[] = { &bcm_qup0, &bcm_qup1, }; -static struct qcom_icc_node *clk_virt_nodes[] = { +static struct qcom_icc_node * const clk_virt_nodes[] = { [MASTER_QUP_CORE_0] = &qup0_core_master, [MASTER_QUP_CORE_1] = &qup1_core_master, [SLAVE_QUP_CORE_0] = &qup0_core_slave, [SLAVE_QUP_CORE_1] = &qup1_core_slave, }; -static struct qcom_icc_desc sc7280_clk_virt = { +static const struct qcom_icc_desc sc7280_clk_virt = { .nodes = clk_virt_nodes, .num_nodes = ARRAY_SIZE(clk_virt_nodes), .bcms = clk_virt_bcms, .num_bcms = ARRAY_SIZE(clk_virt_bcms), }; -static struct qcom_icc_bcm *cnoc2_bcms[] = { +static struct qcom_icc_bcm * const cnoc2_bcms[] = { &bcm_cn1, &bcm_cn2, }; -static struct qcom_icc_node *cnoc2_nodes[] = { +static struct qcom_icc_node * const cnoc2_nodes[] = { [MASTER_CNOC3_CNOC2] = &qnm_cnoc3_cnoc2, [MASTER_QDSS_DAP] = &xm_qdss_dap, [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0, @@ -1603,21 +1603,21 @@ static struct qcom_icc_node *cnoc2_nodes[] = { [SLAVE_SNOC_CFG] = &qns_snoc_cfg, }; -static struct qcom_icc_desc sc7280_cnoc2 = { +static const struct qcom_icc_desc sc7280_cnoc2 = { .nodes = cnoc2_nodes, .num_nodes = ARRAY_SIZE(cnoc2_nodes), .bcms = cnoc2_bcms, .num_bcms = ARRAY_SIZE(cnoc2_bcms), }; -static struct qcom_icc_bcm *cnoc3_bcms[] = { +static struct qcom_icc_bcm * const cnoc3_bcms[] = { &bcm_cn0, &bcm_cn1, &bcm_sn3, &bcm_sn4, }; -static struct qcom_icc_node *cnoc3_nodes[] = { +static struct qcom_icc_node * const cnoc3_nodes[] = { [MASTER_CNOC2_CNOC3] = &qnm_cnoc2_cnoc3, [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, @@ -1635,37 +1635,37 @@ static struct qcom_icc_node *cnoc3_nodes[] = { [SLAVE_TCU] = &xs_sys_tcu_cfg, }; -static struct qcom_icc_desc sc7280_cnoc3 = { +static const struct qcom_icc_desc sc7280_cnoc3 = { .nodes = cnoc3_nodes, .num_nodes = ARRAY_SIZE(cnoc3_nodes), .bcms = cnoc3_bcms, .num_bcms = ARRAY_SIZE(cnoc3_bcms), }; -static struct qcom_icc_bcm *dc_noc_bcms[] = { +static struct qcom_icc_bcm * const dc_noc_bcms[] = { }; -static struct qcom_icc_node *dc_noc_nodes[] = { +static struct qcom_icc_node * const dc_noc_nodes[] = { [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc, [SLAVE_LLCC_CFG] = &qhs_llcc, [SLAVE_GEM_NOC_CFG] = &qns_gemnoc, }; -static struct qcom_icc_desc sc7280_dc_noc = { +static const struct qcom_icc_desc sc7280_dc_noc = { .nodes = dc_noc_nodes, .num_nodes = ARRAY_SIZE(dc_noc_nodes), .bcms = dc_noc_bcms, .num_bcms = ARRAY_SIZE(dc_noc_bcms), }; -static struct qcom_icc_bcm *gem_noc_bcms[] = { +static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh2, &bcm_sh3, &bcm_sh4, }; -static struct qcom_icc_node *gem_noc_nodes[] = { +static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_GPU_TCU] = &alm_gpu_tcu, [MASTER_SYS_TCU] = &alm_sys_tcu, [MASTER_APPSS_PROC] = &chm_apps, @@ -1687,17 +1687,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = { [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc, }; -static struct qcom_icc_desc sc7280_gem_noc = { +static const struct qcom_icc_desc sc7280_gem_noc = { .nodes = gem_noc_nodes, .num_nodes = ARRAY_SIZE(gem_noc_nodes), .bcms = gem_noc_bcms, .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = { +static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = { }; -static struct qcom_icc_node *lpass_ag_noc_nodes[] = { +static struct qcom_icc_node * const lpass_ag_noc_nodes[] = { [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc, [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core, [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi, @@ -1707,38 +1707,38 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = { [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc, }; -static struct qcom_icc_desc sc7280_lpass_ag_noc = { +static const struct qcom_icc_desc sc7280_lpass_ag_noc = { .nodes = lpass_ag_noc_nodes, .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), .bcms = lpass_ag_noc_bcms, .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms), }; -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI1] = &ebi, }; -static struct qcom_icc_desc sc7280_mc_virt = { +static const struct qcom_icc_desc sc7280_mc_virt = { .nodes = mc_virt_nodes, .num_nodes = ARRAY_SIZE(mc_virt_nodes), .bcms = mc_virt_bcms, .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm4, &bcm_mm5, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg, [MASTER_VIDEO_P0] = &qnm_video0, [MASTER_VIDEO_PROC] = &qnm_video_cpu, @@ -1751,40 +1751,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_SERVICE_MNOC] = &srvc_mnoc, }; -static struct qcom_icc_desc sc7280_mmss_noc = { +static const struct qcom_icc_desc sc7280_mmss_noc = { .nodes = mmss_noc_nodes, .num_nodes = ARRAY_SIZE(mmss_noc_nodes), .bcms = mmss_noc_bcms, .num_bcms = ARRAY_SIZE(mmss_noc_bcms), }; -static struct qcom_icc_bcm *nsp_noc_bcms[] = { +static struct qcom_icc_bcm * const nsp_noc_bcms[] = { &bcm_co0, &bcm_co3, }; -static struct qcom_icc_node *nsp_noc_nodes[] = { +static struct qcom_icc_node * const nsp_noc_nodes[] = { [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config, [MASTER_CDSP_PROC] = &qxm_nsp, [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc, }; -static struct qcom_icc_desc sc7280_nsp_noc = { +static const struct qcom_icc_desc sc7280_nsp_noc = { .nodes = nsp_noc_nodes, .num_nodes = ARRAY_SIZE(nsp_noc_nodes), .bcms = nsp_noc_bcms, .num_bcms = ARRAY_SIZE(nsp_noc_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn2, &bcm_sn7, &bcm_sn8, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, [MASTER_SNOC_CFG] = &qnm_snoc_cfg, @@ -1795,7 +1795,7 @@ static struct qcom_icc_node *system_noc_nodes[] = { [SLAVE_SERVICE_SNOC] = &srvc_snoc, }; -static struct qcom_icc_desc sc7280_system_noc = { +static const struct qcom_icc_desc sc7280_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c index e9adf05b9330..8e32ca958824 100644 --- a/drivers/interconnect/qcom/sc8180x.c +++ b/drivers/interconnect/qcom/sc8180x.c @@ -15,229 +15,1611 @@ #include "icc-rpmh.h" #include "sc8180x.h" -DEFINE_QNODE(mas_qhm_a1noc_cfg, SC8180X_MASTER_A1NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A1NOC); -DEFINE_QNODE(mas_xm_ufs_card, SC8180X_MASTER_UFS_CARD, 1, 8, SC8180X_A1NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_ufs_g4, SC8180X_MASTER_UFS_GEN4, 1, 8, SC8180X_A1NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_ufs_mem, SC8180X_MASTER_UFS_MEM, 1, 8, SC8180X_A1NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_usb3_0, SC8180X_MASTER_USB3, 1, 8, SC8180X_A1NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_usb3_1, SC8180X_MASTER_USB3_1, 1, 8, SC8180X_A1NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_usb3_2, SC8180X_MASTER_USB3_2, 1, 16, SC8180X_A1NOC_SNOC_SLV); -DEFINE_QNODE(mas_qhm_a2noc_cfg, SC8180X_MASTER_A2NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_A2NOC); -DEFINE_QNODE(mas_qhm_qdss_bam, SC8180X_MASTER_QDSS_BAM, 1, 4, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qhm_qspi, SC8180X_MASTER_QSPI_0, 1, 4, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qhm_qspi1, SC8180X_MASTER_QSPI_1, 1, 4, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qhm_qup0, SC8180X_MASTER_QUP_0, 1, 4, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qhm_qup1, SC8180X_MASTER_QUP_1, 1, 4, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qhm_qup2, SC8180X_MASTER_QUP_2, 1, 4, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qhm_sensorss_ahb, SC8180X_MASTER_SENSORS_AHB, 1, 4, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qxm_crypto, SC8180X_MASTER_CRYPTO_CORE_0, 1, 8, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qxm_ipa, SC8180X_MASTER_IPA, 1, 8, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_emac, SC8180X_MASTER_EMAC, 1, 8, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_pcie3_0, SC8180X_MASTER_PCIE, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); -DEFINE_QNODE(mas_xm_pcie3_1, SC8180X_MASTER_PCIE_1, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); -DEFINE_QNODE(mas_xm_pcie3_2, SC8180X_MASTER_PCIE_2, 1, 8, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); -DEFINE_QNODE(mas_xm_pcie3_3, SC8180X_MASTER_PCIE_3, 1, 16, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC); -DEFINE_QNODE(mas_xm_qdss_etr, SC8180X_MASTER_QDSS_ETR, 1, 8, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_sdc2, SC8180X_MASTER_SDCC_2, 1, 8, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_xm_sdc4, SC8180X_MASTER_SDCC_4, 1, 8, SC8180X_A2NOC_SNOC_SLV); -DEFINE_QNODE(mas_qxm_camnoc_hf0_uncomp, SC8180X_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP); -DEFINE_QNODE(mas_qxm_camnoc_hf1_uncomp, SC8180X_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP); -DEFINE_QNODE(mas_qxm_camnoc_sf_uncomp, SC8180X_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC8180X_SLAVE_CAMNOC_UNCOMP); -DEFINE_QNODE(mas_qnm_npu, SC8180X_MASTER_NPU, 1, 32, SC8180X_SLAVE_CDSP_MEM_NOC); -DEFINE_QNODE(mas_qnm_snoc, SC8180X_SNOC_CNOC_MAS, 1, 8, SC8180X_SLAVE_TLMM_SOUTH, SC8180X_SLAVE_CDSP_CFG, SC8180X_SLAVE_SPSS_CFG, SC8180X_SLAVE_CAMERA_CFG, SC8180X_SLAVE_SDCC_4, SC8180X_SLAVE_AHB2PHY_CENTER, SC8180X_SLAVE_SDCC_2, SC8180X_SLAVE_PCIE_2_CFG, SC8180X_SLAVE_CNOC_MNOC_CFG, SC8180X_SLAVE_EMAC_CFG, SC8180X_SLAVE_QSPI_0, SC8180X_SLAVE_QSPI_1, SC8180X_SLAVE_TLMM_EAST, SC8180X_SLAVE_SNOC_CFG, SC8180X_SLAVE_AHB2PHY_EAST, SC8180X_SLAVE_GLM, SC8180X_SLAVE_PDM, SC8180X_SLAVE_PCIE_1_CFG, SC8180X_SLAVE_A2NOC_CFG, SC8180X_SLAVE_QDSS_CFG, SC8180X_SLAVE_DISPLAY_CFG, SC8180X_SLAVE_TCSR, SC8180X_SLAVE_UFS_MEM_0_CFG, SC8180X_SLAVE_CNOC_DDRSS, SC8180X_SLAVE_PCIE_0_CFG, SC8180X_SLAVE_QUP_1, SC8180X_SLAVE_QUP_2, SC8180X_SLAVE_NPU_CFG, SC8180X_SLAVE_CRYPTO_0_CFG, SC8180X_SLAVE_GRAPHICS_3D_CFG, SC8180X_SLAVE_VENUS_CFG, SC8180X_SLAVE_TSIF, SC8180X_SLAVE_IPA_CFG, SC8180X_SLAVE_CLK_CTL, SC8180X_SLAVE_SECURITY, SC8180X_SLAVE_AOP, SC8180X_SLAVE_AHB2PHY_WEST, SC8180X_SLAVE_AHB2PHY_SOUTH, SC8180X_SLAVE_SERVICE_CNOC, SC8180X_SLAVE_UFS_CARD_CFG, SC8180X_SLAVE_USB3_1, SC8180X_SLAVE_USB3_2, SC8180X_SLAVE_PCIE_3_CFG, SC8180X_SLAVE_RBCPR_CX_CFG, SC8180X_SLAVE_TLMM_WEST, SC8180X_SLAVE_A1NOC_CFG, SC8180X_SLAVE_AOSS, SC8180X_SLAVE_PRNG, SC8180X_SLAVE_VSENSE_CTRL_CFG, SC8180X_SLAVE_QUP_0, SC8180X_SLAVE_USB3, SC8180X_SLAVE_RBCPR_MMCX_CFG, SC8180X_SLAVE_PIMEM_CFG, SC8180X_SLAVE_UFS_MEM_1_CFG, SC8180X_SLAVE_RBCPR_MX_CFG, SC8180X_SLAVE_IMEM_CFG); -DEFINE_QNODE(mas_qhm_cnoc_dc_noc, SC8180X_MASTER_CNOC_DC_NOC, 1, 4, SC8180X_SLAVE_LLCC_CFG, SC8180X_SLAVE_GEM_NOC_CFG); -DEFINE_QNODE(mas_acm_apps, SC8180X_MASTER_AMPSS_M0, 4, 64, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); -DEFINE_QNODE(mas_acm_gpu_tcu, SC8180X_MASTER_GPU_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); -DEFINE_QNODE(mas_acm_sys_tcu, SC8180X_MASTER_SYS_TCU, 1, 8, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); -DEFINE_QNODE(mas_qhm_gemnoc_cfg, SC8180X_MASTER_GEM_NOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_GEM_NOC_1, SC8180X_SLAVE_SERVICE_GEM_NOC, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG); -DEFINE_QNODE(mas_qnm_cmpnoc, SC8180X_MASTER_COMPUTE_NOC, 2, 32, SC8180X_SLAVE_ECC, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); -DEFINE_QNODE(mas_qnm_gpu, SC8180X_MASTER_GRAPHICS_3D, 4, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); -DEFINE_QNODE(mas_qnm_mnoc_hf, SC8180X_MASTER_MNOC_HF_MEM_NOC, 2, 32, SC8180X_SLAVE_LLCC); -DEFINE_QNODE(mas_qnm_mnoc_sf, SC8180X_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); -DEFINE_QNODE(mas_qnm_pcie, SC8180X_MASTER_GEM_NOC_PCIE_SNOC, 1, 32, SC8180X_SLAVE_LLCC, SC8180X_SLAVE_GEM_NOC_SNOC); -DEFINE_QNODE(mas_qnm_snoc_gc, SC8180X_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC8180X_SLAVE_LLCC); -DEFINE_QNODE(mas_qnm_snoc_sf, SC8180X_MASTER_SNOC_SF_MEM_NOC, 1, 32, SC8180X_SLAVE_LLCC); -DEFINE_QNODE(mas_qxm_ecc, SC8180X_MASTER_ECC, 2, 32, SC8180X_SLAVE_LLCC); -DEFINE_QNODE(mas_ipa_core_master, SC8180X_MASTER_IPA_CORE, 1, 8, SC8180X_SLAVE_IPA_CORE); -DEFINE_QNODE(mas_llcc_mc, SC8180X_MASTER_LLCC, 8, 4, SC8180X_SLAVE_EBI_CH0); -DEFINE_QNODE(mas_qhm_mnoc_cfg, SC8180X_MASTER_CNOC_MNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_MNOC); -DEFINE_QNODE(mas_qxm_camnoc_hf0, SC8180X_MASTER_CAMNOC_HF0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); -DEFINE_QNODE(mas_qxm_camnoc_hf1, SC8180X_MASTER_CAMNOC_HF1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); -DEFINE_QNODE(mas_qxm_camnoc_sf, SC8180X_MASTER_CAMNOC_SF, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); -DEFINE_QNODE(mas_qxm_mdp0, SC8180X_MASTER_MDP_PORT0, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); -DEFINE_QNODE(mas_qxm_mdp1, SC8180X_MASTER_MDP_PORT1, 1, 32, SC8180X_SLAVE_MNOC_HF_MEM_NOC); -DEFINE_QNODE(mas_qxm_rot, SC8180X_MASTER_ROTATOR, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); -DEFINE_QNODE(mas_qxm_venus0, SC8180X_MASTER_VIDEO_P0, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); -DEFINE_QNODE(mas_qxm_venus1, SC8180X_MASTER_VIDEO_P1, 1, 32, SC8180X_SLAVE_MNOC_SF_MEM_NOC); -DEFINE_QNODE(mas_qxm_venus_arm9, SC8180X_MASTER_VIDEO_PROC, 1, 8, SC8180X_SLAVE_MNOC_SF_MEM_NOC); -DEFINE_QNODE(mas_qhm_snoc_cfg, SC8180X_MASTER_SNOC_CFG, 1, 4, SC8180X_SLAVE_SERVICE_SNOC); -DEFINE_QNODE(mas_qnm_aggre1_noc, SC8180X_A1NOC_SNOC_MAS, 1, 32, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_QDSS_STM); -DEFINE_QNODE(mas_qnm_aggre2_noc, SC8180X_A2NOC_SNOC_MAS, 1, 16, SC8180X_SLAVE_SNOC_GEM_NOC_SF, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_PCIE_3, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SLAVE_PCIE_2, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_PCIE_0, SC8180X_SLAVE_PCIE_1, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM); -DEFINE_QNODE(mas_qnm_gemnoc, SC8180X_MASTER_GEM_NOC_SNOC, 1, 8, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM); -DEFINE_QNODE(mas_qxm_pimem, SC8180X_MASTER_PIMEM, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM); -DEFINE_QNODE(mas_xm_gic, SC8180X_MASTER_GIC, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM); -DEFINE_QNODE(slv_qns_a1noc_snoc, SC8180X_A1NOC_SNOC_SLV, 1, 32, SC8180X_A1NOC_SNOC_MAS); -DEFINE_QNODE(slv_srvc_aggre1_noc, SC8180X_SLAVE_SERVICE_A1NOC, 1, 4); -DEFINE_QNODE(slv_qns_a2noc_snoc, SC8180X_A2NOC_SNOC_SLV, 1, 16, SC8180X_A2NOC_SNOC_MAS); -DEFINE_QNODE(slv_qns_pcie_mem_noc, SC8180X_SLAVE_ANOC_PCIE_GEM_NOC, 1, 32, SC8180X_MASTER_GEM_NOC_PCIE_SNOC); -DEFINE_QNODE(slv_srvc_aggre2_noc, SC8180X_SLAVE_SERVICE_A2NOC, 1, 4); -DEFINE_QNODE(slv_qns_camnoc_uncomp, SC8180X_SLAVE_CAMNOC_UNCOMP, 1, 32); -DEFINE_QNODE(slv_qns_cdsp_mem_noc, SC8180X_SLAVE_CDSP_MEM_NOC, 2, 32, SC8180X_MASTER_COMPUTE_NOC); -DEFINE_QNODE(slv_qhs_a1_noc_cfg, SC8180X_SLAVE_A1NOC_CFG, 1, 4, SC8180X_MASTER_A1NOC_CFG); -DEFINE_QNODE(slv_qhs_a2_noc_cfg, SC8180X_SLAVE_A2NOC_CFG, 1, 4, SC8180X_MASTER_A2NOC_CFG); -DEFINE_QNODE(slv_qhs_ahb2phy_refgen_center, SC8180X_SLAVE_AHB2PHY_CENTER, 1, 4); -DEFINE_QNODE(slv_qhs_ahb2phy_refgen_east, SC8180X_SLAVE_AHB2PHY_EAST, 1, 4); -DEFINE_QNODE(slv_qhs_ahb2phy_refgen_west, SC8180X_SLAVE_AHB2PHY_WEST, 1, 4); -DEFINE_QNODE(slv_qhs_ahb2phy_south, SC8180X_SLAVE_AHB2PHY_SOUTH, 1, 4); -DEFINE_QNODE(slv_qhs_aop, SC8180X_SLAVE_AOP, 1, 4); -DEFINE_QNODE(slv_qhs_aoss, SC8180X_SLAVE_AOSS, 1, 4); -DEFINE_QNODE(slv_qhs_camera_cfg, SC8180X_SLAVE_CAMERA_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_clk_ctl, SC8180X_SLAVE_CLK_CTL, 1, 4); -DEFINE_QNODE(slv_qhs_compute_dsp, SC8180X_SLAVE_CDSP_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_cpr_cx, SC8180X_SLAVE_RBCPR_CX_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_cpr_mmcx, SC8180X_SLAVE_RBCPR_MMCX_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_cpr_mx, SC8180X_SLAVE_RBCPR_MX_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_crypto0_cfg, SC8180X_SLAVE_CRYPTO_0_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_ddrss_cfg, SC8180X_SLAVE_CNOC_DDRSS, 1, 4, SC8180X_MASTER_CNOC_DC_NOC); -DEFINE_QNODE(slv_qhs_display_cfg, SC8180X_SLAVE_DISPLAY_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_emac_cfg, SC8180X_SLAVE_EMAC_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_glm, SC8180X_SLAVE_GLM, 1, 4); -DEFINE_QNODE(slv_qhs_gpuss_cfg, SC8180X_SLAVE_GRAPHICS_3D_CFG, 1, 8); -DEFINE_QNODE(slv_qhs_imem_cfg, SC8180X_SLAVE_IMEM_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_ipa, SC8180X_SLAVE_IPA_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_mnoc_cfg, SC8180X_SLAVE_CNOC_MNOC_CFG, 1, 4, SC8180X_MASTER_CNOC_MNOC_CFG); -DEFINE_QNODE(slv_qhs_npu_cfg, SC8180X_SLAVE_NPU_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_pcie0_cfg, SC8180X_SLAVE_PCIE_0_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_pcie1_cfg, SC8180X_SLAVE_PCIE_1_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_pcie2_cfg, SC8180X_SLAVE_PCIE_2_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_pcie3_cfg, SC8180X_SLAVE_PCIE_3_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_pdm, SC8180X_SLAVE_PDM, 1, 4); -DEFINE_QNODE(slv_qhs_pimem_cfg, SC8180X_SLAVE_PIMEM_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_prng, SC8180X_SLAVE_PRNG, 1, 4); -DEFINE_QNODE(slv_qhs_qdss_cfg, SC8180X_SLAVE_QDSS_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_qspi_0, SC8180X_SLAVE_QSPI_0, 1, 4); -DEFINE_QNODE(slv_qhs_qspi_1, SC8180X_SLAVE_QSPI_1, 1, 4); -DEFINE_QNODE(slv_qhs_qupv3_east0, SC8180X_SLAVE_QUP_1, 1, 4); -DEFINE_QNODE(slv_qhs_qupv3_east1, SC8180X_SLAVE_QUP_2, 1, 4); -DEFINE_QNODE(slv_qhs_qupv3_west, SC8180X_SLAVE_QUP_0, 1, 4); -DEFINE_QNODE(slv_qhs_sdc2, SC8180X_SLAVE_SDCC_2, 1, 4); -DEFINE_QNODE(slv_qhs_sdc4, SC8180X_SLAVE_SDCC_4, 1, 4); -DEFINE_QNODE(slv_qhs_security, SC8180X_SLAVE_SECURITY, 1, 4); -DEFINE_QNODE(slv_qhs_snoc_cfg, SC8180X_SLAVE_SNOC_CFG, 1, 4, SC8180X_MASTER_SNOC_CFG); -DEFINE_QNODE(slv_qhs_spss_cfg, SC8180X_SLAVE_SPSS_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_tcsr, SC8180X_SLAVE_TCSR, 1, 4); -DEFINE_QNODE(slv_qhs_tlmm_east, SC8180X_SLAVE_TLMM_EAST, 1, 4); -DEFINE_QNODE(slv_qhs_tlmm_south, SC8180X_SLAVE_TLMM_SOUTH, 1, 4); -DEFINE_QNODE(slv_qhs_tlmm_west, SC8180X_SLAVE_TLMM_WEST, 1, 4); -DEFINE_QNODE(slv_qhs_tsif, SC8180X_SLAVE_TSIF, 1, 4); -DEFINE_QNODE(slv_qhs_ufs_card_cfg, SC8180X_SLAVE_UFS_CARD_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_ufs_mem0_cfg, SC8180X_SLAVE_UFS_MEM_0_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_ufs_mem1_cfg, SC8180X_SLAVE_UFS_MEM_1_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_usb3_0, SC8180X_SLAVE_USB3, 1, 4); -DEFINE_QNODE(slv_qhs_usb3_1, SC8180X_SLAVE_USB3_1, 1, 4); -DEFINE_QNODE(slv_qhs_usb3_2, SC8180X_SLAVE_USB3_2, 1, 4); -DEFINE_QNODE(slv_qhs_venus_cfg, SC8180X_SLAVE_VENUS_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_vsense_ctrl_cfg, SC8180X_SLAVE_VSENSE_CTRL_CFG, 1, 4); -DEFINE_QNODE(slv_srvc_cnoc, SC8180X_SLAVE_SERVICE_CNOC, 1, 4); -DEFINE_QNODE(slv_qhs_gemnoc, SC8180X_SLAVE_GEM_NOC_CFG, 1, 4, SC8180X_MASTER_GEM_NOC_CFG); -DEFINE_QNODE(slv_qhs_llcc, SC8180X_SLAVE_LLCC_CFG, 1, 4); -DEFINE_QNODE(slv_qhs_mdsp_ms_mpu_cfg, SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4); -DEFINE_QNODE(slv_qns_ecc, SC8180X_SLAVE_ECC, 1, 32); -DEFINE_QNODE(slv_qns_gem_noc_snoc, SC8180X_SLAVE_GEM_NOC_SNOC, 1, 8, SC8180X_MASTER_GEM_NOC_SNOC); -DEFINE_QNODE(slv_qns_llcc, SC8180X_SLAVE_LLCC, 8, 16, SC8180X_MASTER_LLCC); -DEFINE_QNODE(slv_srvc_gemnoc, SC8180X_SLAVE_SERVICE_GEM_NOC, 1, 4); -DEFINE_QNODE(slv_srvc_gemnoc1, SC8180X_SLAVE_SERVICE_GEM_NOC_1, 1, 4); -DEFINE_QNODE(slv_ipa_core_slave, SC8180X_SLAVE_IPA_CORE, 1, 8); -DEFINE_QNODE(slv_ebi, SC8180X_SLAVE_EBI_CH0, 8, 4); -DEFINE_QNODE(slv_qns2_mem_noc, SC8180X_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC8180X_MASTER_MNOC_SF_MEM_NOC); -DEFINE_QNODE(slv_qns_mem_noc_hf, SC8180X_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SC8180X_MASTER_MNOC_HF_MEM_NOC); -DEFINE_QNODE(slv_srvc_mnoc, SC8180X_SLAVE_SERVICE_MNOC, 1, 4); -DEFINE_QNODE(slv_qhs_apss, SC8180X_SLAVE_APPSS, 1, 8); -DEFINE_QNODE(slv_qns_cnoc, SC8180X_SNOC_CNOC_SLV, 1, 8, SC8180X_SNOC_CNOC_MAS); -DEFINE_QNODE(slv_qns_gemnoc_gc, SC8180X_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC8180X_MASTER_SNOC_GC_MEM_NOC); -DEFINE_QNODE(slv_qns_gemnoc_sf, SC8180X_SLAVE_SNOC_GEM_NOC_SF, 1, 32, SC8180X_MASTER_SNOC_SF_MEM_NOC); -DEFINE_QNODE(slv_qxs_imem, SC8180X_SLAVE_OCIMEM, 1, 8); -DEFINE_QNODE(slv_qxs_pimem, SC8180X_SLAVE_PIMEM, 1, 8); -DEFINE_QNODE(slv_srvc_snoc, SC8180X_SLAVE_SERVICE_SNOC, 1, 4); -DEFINE_QNODE(slv_xs_pcie_0, SC8180X_SLAVE_PCIE_0, 1, 8); -DEFINE_QNODE(slv_xs_pcie_1, SC8180X_SLAVE_PCIE_1, 1, 8); -DEFINE_QNODE(slv_xs_pcie_2, SC8180X_SLAVE_PCIE_2, 1, 8); -DEFINE_QNODE(slv_xs_pcie_3, SC8180X_SLAVE_PCIE_3, 1, 8); -DEFINE_QNODE(slv_xs_qdss_stm, SC8180X_SLAVE_QDSS_STM, 1, 4); -DEFINE_QNODE(slv_xs_sys_tcu_cfg, SC8180X_SLAVE_TCU, 1, 8); - -DEFINE_QBCM(bcm_acv, "ACV", false, &slv_ebi); -DEFINE_QBCM(bcm_mc0, "MC0", false, &slv_ebi); -DEFINE_QBCM(bcm_sh0, "SH0", false, &slv_qns_llcc); -DEFINE_QBCM(bcm_mm0, "MM0", false, &slv_qns_mem_noc_hf); -DEFINE_QBCM(bcm_co0, "CO0", false, &slv_qns_cdsp_mem_noc); -DEFINE_QBCM(bcm_ce0, "CE0", false, &mas_qxm_crypto); -DEFINE_QBCM(bcm_cn0, "CN0", false, &mas_qnm_snoc, &slv_qhs_a1_noc_cfg, &slv_qhs_a2_noc_cfg, &slv_qhs_ahb2phy_refgen_center, &slv_qhs_ahb2phy_refgen_east, &slv_qhs_ahb2phy_refgen_west, &slv_qhs_ahb2phy_south, &slv_qhs_aop, &slv_qhs_aoss, &slv_qhs_camera_cfg, &slv_qhs_clk_ctl, &slv_qhs_compute_dsp, &slv_qhs_cpr_cx, &slv_qhs_cpr_mmcx, &slv_qhs_cpr_mx, &slv_qhs_crypto0_cfg, &slv_qhs_ddrss_cfg, &slv_qhs_display_cfg, &slv_qhs_emac_cfg, &slv_qhs_glm, &slv_qhs_gpuss_cfg, &slv_qhs_imem_cfg, &slv_qhs_ipa, &slv_qhs_mnoc_cfg, &slv_qhs_npu_cfg, &slv_qhs_pcie0_cfg, &slv_qhs_pcie1_cfg, &slv_qhs_pcie2_cfg, &slv_qhs_pcie3_cfg, &slv_qhs_pdm, &slv_qhs_pimem_cfg, &slv_qhs_prng, &slv_qhs_qdss_cfg, &slv_qhs_qspi_0, &slv_qhs_qspi_1, &slv_qhs_qupv3_east0, &slv_qhs_qupv3_east1, &slv_qhs_qupv3_west, &slv_qhs_sdc2, &slv_qhs_sdc4, &slv_qhs_security, &slv_qhs_snoc_cfg, &slv_qhs_spss_cfg, &slv_qhs_tcsr, &slv_qhs_tlmm_east, &slv_qhs_tlmm_south, &slv_qhs_tlmm_west, &slv_qhs_tsif, &slv_qhs_ufs_card_cfg, &slv_qhs_ufs_mem0_cfg, &slv_qhs_ufs_mem1_cfg, &slv_qhs_usb3_0, &slv_qhs_usb3_1, &slv_qhs_usb3_2, &slv_qhs_venus_cfg, &slv_qhs_vsense_ctrl_cfg, &slv_srvc_cnoc); -DEFINE_QBCM(bcm_mm1, "MM1", false, &mas_qxm_camnoc_hf0_uncomp, &mas_qxm_camnoc_hf1_uncomp, &mas_qxm_camnoc_sf_uncomp, &mas_qxm_camnoc_hf0, &mas_qxm_camnoc_hf1, &mas_qxm_mdp0, &mas_qxm_mdp1); -DEFINE_QBCM(bcm_qup0, "QUP0", false, &mas_qhm_qup0, &mas_qhm_qup1, &mas_qhm_qup2); -DEFINE_QBCM(bcm_sh2, "SH2", false, &slv_qns_gem_noc_snoc); -DEFINE_QBCM(bcm_mm2, "MM2", false, &mas_qxm_camnoc_sf, &mas_qxm_rot, &mas_qxm_venus0, &mas_qxm_venus1, &mas_qxm_venus_arm9, &slv_qns2_mem_noc); -DEFINE_QBCM(bcm_sh3, "SH3", false, &mas_acm_apps); -DEFINE_QBCM(bcm_sn0, "SN0", false, &slv_qns_gemnoc_sf); -DEFINE_QBCM(bcm_sn1, "SN1", false, &slv_qxs_imem); -DEFINE_QBCM(bcm_sn2, "SN2", false, &slv_qns_gemnoc_gc); -DEFINE_QBCM(bcm_co2, "CO2", false, &mas_qnm_npu); -DEFINE_QBCM(bcm_ip0, "IP0", false, &slv_ipa_core_slave); -DEFINE_QBCM(bcm_sn3, "SN3", false, &slv_srvc_aggre1_noc, &slv_qns_cnoc); -DEFINE_QBCM(bcm_sn4, "SN4", false, &slv_qxs_pimem); -DEFINE_QBCM(bcm_sn8, "SN8", false, &slv_xs_pcie_0, &slv_xs_pcie_1, &slv_xs_pcie_2, &slv_xs_pcie_3); -DEFINE_QBCM(bcm_sn9, "SN9", false, &mas_qnm_aggre1_noc); -DEFINE_QBCM(bcm_sn11, "SN11", false, &mas_qnm_aggre2_noc); -DEFINE_QBCM(bcm_sn14, "SN14", false, &slv_qns_pcie_mem_noc); -DEFINE_QBCM(bcm_sn15, "SN15", false, &mas_qnm_gemnoc); - -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_node mas_qhm_a1noc_cfg = { + .name = "mas_qhm_a1noc_cfg", + .id = SC8180X_MASTER_A1NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_SERVICE_A1NOC } +}; + +static struct qcom_icc_node mas_xm_ufs_card = { + .name = "mas_xm_ufs_card", + .id = SC8180X_MASTER_UFS_CARD, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A1NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_ufs_g4 = { + .name = "mas_xm_ufs_g4", + .id = SC8180X_MASTER_UFS_GEN4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A1NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_ufs_mem = { + .name = "mas_xm_ufs_mem", + .id = SC8180X_MASTER_UFS_MEM, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A1NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_usb3_0 = { + .name = "mas_xm_usb3_0", + .id = SC8180X_MASTER_USB3, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A1NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_usb3_1 = { + .name = "mas_xm_usb3_1", + .id = SC8180X_MASTER_USB3_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A1NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_usb3_2 = { + .name = "mas_xm_usb3_2", + .id = SC8180X_MASTER_USB3_2, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8180X_A1NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qhm_a2noc_cfg = { + .name = "mas_qhm_a2noc_cfg", + .id = SC8180X_MASTER_A2NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_SERVICE_A2NOC } +}; + +static struct qcom_icc_node mas_qhm_qdss_bam = { + .name = "mas_qhm_qdss_bam", + .id = SC8180X_MASTER_QDSS_BAM, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qhm_qspi = { + .name = "mas_qhm_qspi", + .id = SC8180X_MASTER_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qhm_qspi1 = { + .name = "mas_qhm_qspi1", + .id = SC8180X_MASTER_QSPI_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qhm_qup0 = { + .name = "mas_qhm_qup0", + .id = SC8180X_MASTER_QUP_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qhm_qup1 = { + .name = "mas_qhm_qup1", + .id = SC8180X_MASTER_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qhm_qup2 = { + .name = "mas_qhm_qup2", + .id = SC8180X_MASTER_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qhm_sensorss_ahb = { + .name = "mas_qhm_sensorss_ahb", + .id = SC8180X_MASTER_SENSORS_AHB, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qxm_crypto = { + .name = "mas_qxm_crypto", + .id = SC8180X_MASTER_CRYPTO_CORE_0, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qxm_ipa = { + .name = "mas_qxm_ipa", + .id = SC8180X_MASTER_IPA, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_emac = { + .name = "mas_xm_emac", + .id = SC8180X_MASTER_EMAC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_pcie3_0 = { + .name = "mas_xm_pcie3_0", + .id = SC8180X_MASTER_PCIE, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC } +}; + +static struct qcom_icc_node mas_xm_pcie3_1 = { + .name = "mas_xm_pcie3_1", + .id = SC8180X_MASTER_PCIE_1, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC } +}; + +static struct qcom_icc_node mas_xm_pcie3_2 = { + .name = "mas_xm_pcie3_2", + .id = SC8180X_MASTER_PCIE_2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC } +}; + +static struct qcom_icc_node mas_xm_pcie3_3 = { + .name = "mas_xm_pcie3_3", + .id = SC8180X_MASTER_PCIE_3, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC } +}; + +static struct qcom_icc_node mas_xm_qdss_etr = { + .name = "mas_xm_qdss_etr", + .id = SC8180X_MASTER_QDSS_ETR, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_sdc2 = { + .name = "mas_xm_sdc2", + .id = SC8180X_MASTER_SDCC_2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_xm_sdc4 = { + .name = "mas_xm_sdc4", + .id = SC8180X_MASTER_SDCC_4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_SLV } +}; + +static struct qcom_icc_node mas_qxm_camnoc_hf0_uncomp = { + .name = "mas_qxm_camnoc_hf0_uncomp", + .id = SC8180X_MASTER_CAMNOC_HF0_UNCOMP, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_CAMNOC_UNCOMP } +}; + +static struct qcom_icc_node mas_qxm_camnoc_hf1_uncomp = { + .name = "mas_qxm_camnoc_hf1_uncomp", + .id = SC8180X_MASTER_CAMNOC_HF1_UNCOMP, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_CAMNOC_UNCOMP } +}; + +static struct qcom_icc_node mas_qxm_camnoc_sf_uncomp = { + .name = "mas_qxm_camnoc_sf_uncomp", + .id = SC8180X_MASTER_CAMNOC_SF_UNCOMP, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_CAMNOC_UNCOMP } +}; + +static struct qcom_icc_node mas_qnm_npu = { + .name = "mas_qnm_npu", + .id = SC8180X_MASTER_NPU, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_CDSP_MEM_NOC } +}; + +static struct qcom_icc_node mas_qnm_snoc = { + .name = "mas_qnm_snoc", + .id = SC8180X_SNOC_CNOC_MAS, + .channels = 1, + .buswidth = 8, + .num_links = 56, + .links = { SC8180X_SLAVE_TLMM_SOUTH, + SC8180X_SLAVE_CDSP_CFG, + SC8180X_SLAVE_SPSS_CFG, + SC8180X_SLAVE_CAMERA_CFG, + SC8180X_SLAVE_SDCC_4, + SC8180X_SLAVE_AHB2PHY_CENTER, + SC8180X_SLAVE_SDCC_2, + SC8180X_SLAVE_PCIE_2_CFG, + SC8180X_SLAVE_CNOC_MNOC_CFG, + SC8180X_SLAVE_EMAC_CFG, + SC8180X_SLAVE_QSPI_0, + SC8180X_SLAVE_QSPI_1, + SC8180X_SLAVE_TLMM_EAST, + SC8180X_SLAVE_SNOC_CFG, + SC8180X_SLAVE_AHB2PHY_EAST, + SC8180X_SLAVE_GLM, + SC8180X_SLAVE_PDM, + SC8180X_SLAVE_PCIE_1_CFG, + SC8180X_SLAVE_A2NOC_CFG, + SC8180X_SLAVE_QDSS_CFG, + SC8180X_SLAVE_DISPLAY_CFG, + SC8180X_SLAVE_TCSR, + SC8180X_SLAVE_UFS_MEM_0_CFG, + SC8180X_SLAVE_CNOC_DDRSS, + SC8180X_SLAVE_PCIE_0_CFG, + SC8180X_SLAVE_QUP_1, + SC8180X_SLAVE_QUP_2, + SC8180X_SLAVE_NPU_CFG, + SC8180X_SLAVE_CRYPTO_0_CFG, + SC8180X_SLAVE_GRAPHICS_3D_CFG, + SC8180X_SLAVE_VENUS_CFG, + SC8180X_SLAVE_TSIF, + SC8180X_SLAVE_IPA_CFG, + SC8180X_SLAVE_CLK_CTL, + SC8180X_SLAVE_SECURITY, + SC8180X_SLAVE_AOP, + SC8180X_SLAVE_AHB2PHY_WEST, + SC8180X_SLAVE_AHB2PHY_SOUTH, + SC8180X_SLAVE_SERVICE_CNOC, + SC8180X_SLAVE_UFS_CARD_CFG, + SC8180X_SLAVE_USB3_1, + SC8180X_SLAVE_USB3_2, + SC8180X_SLAVE_PCIE_3_CFG, + SC8180X_SLAVE_RBCPR_CX_CFG, + SC8180X_SLAVE_TLMM_WEST, + SC8180X_SLAVE_A1NOC_CFG, + SC8180X_SLAVE_AOSS, + SC8180X_SLAVE_PRNG, + SC8180X_SLAVE_VSENSE_CTRL_CFG, + SC8180X_SLAVE_QUP_0, + SC8180X_SLAVE_USB3, + SC8180X_SLAVE_RBCPR_MMCX_CFG, + SC8180X_SLAVE_PIMEM_CFG, + SC8180X_SLAVE_UFS_MEM_1_CFG, + SC8180X_SLAVE_RBCPR_MX_CFG, + SC8180X_SLAVE_IMEM_CFG } +}; + +static struct qcom_icc_node mas_qhm_cnoc_dc_noc = { + .name = "mas_qhm_cnoc_dc_noc", + .id = SC8180X_MASTER_CNOC_DC_NOC, + .channels = 1, + .buswidth = 4, + .num_links = 2, + .links = { SC8180X_SLAVE_LLCC_CFG, + SC8180X_SLAVE_GEM_NOC_CFG } +}; + +static struct qcom_icc_node mas_acm_apps = { + .name = "mas_acm_apps", + .id = SC8180X_MASTER_AMPSS_M0, + .channels = 4, + .buswidth = 64, + .num_links = 3, + .links = { SC8180X_SLAVE_ECC, + SC8180X_SLAVE_LLCC, + SC8180X_SLAVE_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node mas_acm_gpu_tcu = { + .name = "mas_acm_gpu_tcu", + .id = SC8180X_MASTER_GPU_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SC8180X_SLAVE_LLCC, + SC8180X_SLAVE_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node mas_acm_sys_tcu = { + .name = "mas_acm_sys_tcu", + .id = SC8180X_MASTER_SYS_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SC8180X_SLAVE_LLCC, + SC8180X_SLAVE_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node mas_qhm_gemnoc_cfg = { + .name = "mas_qhm_gemnoc_cfg", + .id = SC8180X_MASTER_GEM_NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 3, + .links = { SC8180X_SLAVE_SERVICE_GEM_NOC_1, + SC8180X_SLAVE_SERVICE_GEM_NOC, + SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG } +}; + +static struct qcom_icc_node mas_qnm_cmpnoc = { + .name = "mas_qnm_cmpnoc", + .id = SC8180X_MASTER_COMPUTE_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 3, + .links = { SC8180X_SLAVE_ECC, + SC8180X_SLAVE_LLCC, + SC8180X_SLAVE_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node mas_qnm_gpu = { + .name = "mas_qnm_gpu", + .id = SC8180X_MASTER_GRAPHICS_3D, + .channels = 4, + .buswidth = 32, + .num_links = 2, + .links = { SC8180X_SLAVE_LLCC, + SC8180X_SLAVE_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node mas_qnm_mnoc_hf = { + .name = "mas_qnm_mnoc_hf", + .id = SC8180X_MASTER_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_LLCC } +}; + +static struct qcom_icc_node mas_qnm_mnoc_sf = { + .name = "mas_qnm_mnoc_sf", + .id = SC8180X_MASTER_MNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 32, + .num_links = 2, + .links = { SC8180X_SLAVE_LLCC, + SC8180X_SLAVE_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node mas_qnm_pcie = { + .name = "mas_qnm_pcie", + .id = SC8180X_MASTER_GEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 32, + .num_links = 2, + .links = { SC8180X_SLAVE_LLCC, + SC8180X_SLAVE_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node mas_qnm_snoc_gc = { + .name = "mas_qnm_snoc_gc", + .id = SC8180X_MASTER_SNOC_GC_MEM_NOC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_SLAVE_LLCC } +}; + +static struct qcom_icc_node mas_qnm_snoc_sf = { + .name = "mas_qnm_snoc_sf", + .id = SC8180X_MASTER_SNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_LLCC } +}; + +static struct qcom_icc_node mas_qxm_ecc = { + .name = "mas_qxm_ecc", + .id = SC8180X_MASTER_ECC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_LLCC } +}; + +static struct qcom_icc_node mas_ipa_core_master = { + .name = "mas_ipa_core_master", + .id = SC8180X_MASTER_IPA_CORE, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_SLAVE_IPA_CORE } +}; + +static struct qcom_icc_node mas_llcc_mc = { + .name = "mas_llcc_mc", + .id = SC8180X_MASTER_LLCC, + .channels = 8, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_EBI_CH0 } +}; + +static struct qcom_icc_node mas_qhm_mnoc_cfg = { + .name = "mas_qhm_mnoc_cfg", + .id = SC8180X_MASTER_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_SERVICE_MNOC } +}; + +static struct qcom_icc_node mas_qxm_camnoc_hf0 = { + .name = "mas_qxm_camnoc_hf0", + .id = SC8180X_MASTER_CAMNOC_HF0, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_camnoc_hf1 = { + .name = "mas_qxm_camnoc_hf1", + .id = SC8180X_MASTER_CAMNOC_HF1, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_camnoc_sf = { + .name = "mas_qxm_camnoc_sf", + .id = SC8180X_MASTER_CAMNOC_SF, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_mdp0 = { + .name = "mas_qxm_mdp0", + .id = SC8180X_MASTER_MDP_PORT0, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_mdp1 = { + .name = "mas_qxm_mdp1", + .id = SC8180X_MASTER_MDP_PORT1, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_rot = { + .name = "mas_qxm_rot", + .id = SC8180X_MASTER_ROTATOR, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_venus0 = { + .name = "mas_qxm_venus0", + .id = SC8180X_MASTER_VIDEO_P0, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_venus1 = { + .name = "mas_qxm_venus1", + .id = SC8180X_MASTER_VIDEO_P1, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qxm_venus_arm9 = { + .name = "mas_qxm_venus_arm9", + .id = SC8180X_MASTER_VIDEO_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC } +}; + +static struct qcom_icc_node mas_qhm_snoc_cfg = { + .name = "mas_qhm_snoc_cfg", + .id = SC8180X_MASTER_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_SERVICE_SNOC } +}; + +static struct qcom_icc_node mas_qnm_aggre1_noc = { + .name = "mas_qnm_aggre1_noc", + .id = SC8180X_A1NOC_SNOC_MAS, + .channels = 1, + .buswidth = 32, + .num_links = 6, + .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF, + SC8180X_SLAVE_PIMEM, + SC8180X_SLAVE_OCIMEM, + SC8180X_SLAVE_APPSS, + SC8180X_SNOC_CNOC_SLV, + SC8180X_SLAVE_QDSS_STM } +}; + +static struct qcom_icc_node mas_qnm_aggre2_noc = { + .name = "mas_qnm_aggre2_noc", + .id = SC8180X_A2NOC_SNOC_MAS, + .channels = 1, + .buswidth = 16, + .num_links = 11, + .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF, + SC8180X_SLAVE_PIMEM, + SC8180X_SLAVE_PCIE_3, + SC8180X_SLAVE_OCIMEM, + SC8180X_SLAVE_APPSS, + SC8180X_SLAVE_PCIE_2, + SC8180X_SNOC_CNOC_SLV, + SC8180X_SLAVE_PCIE_0, + SC8180X_SLAVE_PCIE_1, + SC8180X_SLAVE_TCU, + SC8180X_SLAVE_QDSS_STM } +}; + +static struct qcom_icc_node mas_qnm_gemnoc = { + .name = "mas_qnm_gemnoc", + .id = SC8180X_MASTER_GEM_NOC_SNOC, + .channels = 1, + .buswidth = 8, + .num_links = 6, + .links = { SC8180X_SLAVE_PIMEM, + SC8180X_SLAVE_OCIMEM, + SC8180X_SLAVE_APPSS, + SC8180X_SNOC_CNOC_SLV, + SC8180X_SLAVE_TCU, + SC8180X_SLAVE_QDSS_STM } +}; + +static struct qcom_icc_node mas_qxm_pimem = { + .name = "mas_qxm_pimem", + .id = SC8180X_MASTER_PIMEM, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC, + SC8180X_SLAVE_OCIMEM } +}; + +static struct qcom_icc_node mas_xm_gic = { + .name = "mas_xm_gic", + .id = SC8180X_MASTER_GIC, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC, + SC8180X_SLAVE_OCIMEM } +}; + +static struct qcom_icc_node mas_qup_core_0 = { + .name = "mas_qup_core_0", + .id = SC8180X_MASTER_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_QUP_CORE_0 } +}; + +static struct qcom_icc_node mas_qup_core_1 = { + .name = "mas_qup_core_1", + .id = SC8180X_MASTER_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_QUP_CORE_1 } +}; + +static struct qcom_icc_node mas_qup_core_2 = { + .name = "mas_qup_core_2", + .id = SC8180X_MASTER_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_SLAVE_QUP_CORE_2 } +}; + +static struct qcom_icc_node slv_qns_a1noc_snoc = { + .name = "slv_qns_a1noc_snoc", + .id = SC8180X_A1NOC_SNOC_SLV, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_A1NOC_SNOC_MAS } +}; + +static struct qcom_icc_node slv_srvc_aggre1_noc = { + .name = "slv_srvc_aggre1_noc", + .id = SC8180X_SLAVE_SERVICE_A1NOC, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qns_a2noc_snoc = { + .name = "slv_qns_a2noc_snoc", + .id = SC8180X_A2NOC_SNOC_SLV, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8180X_A2NOC_SNOC_MAS } +}; + +static struct qcom_icc_node slv_qns_pcie_mem_noc = { + .name = "slv_qns_pcie_mem_noc", + .id = SC8180X_SLAVE_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_MASTER_GEM_NOC_PCIE_SNOC } +}; + +static struct qcom_icc_node slv_srvc_aggre2_noc = { + .name = "slv_srvc_aggre2_noc", + .id = SC8180X_SLAVE_SERVICE_A2NOC, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qns_camnoc_uncomp = { + .name = "slv_qns_camnoc_uncomp", + .id = SC8180X_SLAVE_CAMNOC_UNCOMP, + .channels = 1, + .buswidth = 32 +}; + +static struct qcom_icc_node slv_qns_cdsp_mem_noc = { + .name = "slv_qns_cdsp_mem_noc", + .id = SC8180X_SLAVE_CDSP_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_MASTER_COMPUTE_NOC } +}; + +static struct qcom_icc_node slv_qhs_a1_noc_cfg = { + .name = "slv_qhs_a1_noc_cfg", + .id = SC8180X_SLAVE_A1NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_MASTER_A1NOC_CFG } +}; + +static struct qcom_icc_node slv_qhs_a2_noc_cfg = { + .name = "slv_qhs_a2_noc_cfg", + .id = SC8180X_SLAVE_A2NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_MASTER_A2NOC_CFG } +}; + +static struct qcom_icc_node slv_qhs_ahb2phy_refgen_center = { + .name = "slv_qhs_ahb2phy_refgen_center", + .id = SC8180X_SLAVE_AHB2PHY_CENTER, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ahb2phy_refgen_east = { + .name = "slv_qhs_ahb2phy_refgen_east", + .id = SC8180X_SLAVE_AHB2PHY_EAST, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ahb2phy_refgen_west = { + .name = "slv_qhs_ahb2phy_refgen_west", + .id = SC8180X_SLAVE_AHB2PHY_WEST, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ahb2phy_south = { + .name = "slv_qhs_ahb2phy_south", + .id = SC8180X_SLAVE_AHB2PHY_SOUTH, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_aop = { + .name = "slv_qhs_aop", + .id = SC8180X_SLAVE_AOP, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_aoss = { + .name = "slv_qhs_aoss", + .id = SC8180X_SLAVE_AOSS, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_camera_cfg = { + .name = "slv_qhs_camera_cfg", + .id = SC8180X_SLAVE_CAMERA_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_clk_ctl = { + .name = "slv_qhs_clk_ctl", + .id = SC8180X_SLAVE_CLK_CTL, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_compute_dsp = { + .name = "slv_qhs_compute_dsp", + .id = SC8180X_SLAVE_CDSP_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_cpr_cx = { + .name = "slv_qhs_cpr_cx", + .id = SC8180X_SLAVE_RBCPR_CX_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_cpr_mmcx = { + .name = "slv_qhs_cpr_mmcx", + .id = SC8180X_SLAVE_RBCPR_MMCX_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_cpr_mx = { + .name = "slv_qhs_cpr_mx", + .id = SC8180X_SLAVE_RBCPR_MX_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_crypto0_cfg = { + .name = "slv_qhs_crypto0_cfg", + .id = SC8180X_SLAVE_CRYPTO_0_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ddrss_cfg = { + .name = "slv_qhs_ddrss_cfg", + .id = SC8180X_SLAVE_CNOC_DDRSS, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_MASTER_CNOC_DC_NOC } +}; + +static struct qcom_icc_node slv_qhs_display_cfg = { + .name = "slv_qhs_display_cfg", + .id = SC8180X_SLAVE_DISPLAY_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_emac_cfg = { + .name = "slv_qhs_emac_cfg", + .id = SC8180X_SLAVE_EMAC_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_glm = { + .name = "slv_qhs_glm", + .id = SC8180X_SLAVE_GLM, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_gpuss_cfg = { + .name = "slv_qhs_gpuss_cfg", + .id = SC8180X_SLAVE_GRAPHICS_3D_CFG, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_qhs_imem_cfg = { + .name = "slv_qhs_imem_cfg", + .id = SC8180X_SLAVE_IMEM_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ipa = { + .name = "slv_qhs_ipa", + .id = SC8180X_SLAVE_IPA_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_mnoc_cfg = { + .name = "slv_qhs_mnoc_cfg", + .id = SC8180X_SLAVE_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_MASTER_CNOC_MNOC_CFG } +}; + +static struct qcom_icc_node slv_qhs_npu_cfg = { + .name = "slv_qhs_npu_cfg", + .id = SC8180X_SLAVE_NPU_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_pcie0_cfg = { + .name = "slv_qhs_pcie0_cfg", + .id = SC8180X_SLAVE_PCIE_0_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_pcie1_cfg = { + .name = "slv_qhs_pcie1_cfg", + .id = SC8180X_SLAVE_PCIE_1_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_pcie2_cfg = { + .name = "slv_qhs_pcie2_cfg", + .id = SC8180X_SLAVE_PCIE_2_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_pcie3_cfg = { + .name = "slv_qhs_pcie3_cfg", + .id = SC8180X_SLAVE_PCIE_3_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_pdm = { + .name = "slv_qhs_pdm", + .id = SC8180X_SLAVE_PDM, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_pimem_cfg = { + .name = "slv_qhs_pimem_cfg", + .id = SC8180X_SLAVE_PIMEM_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_prng = { + .name = "slv_qhs_prng", + .id = SC8180X_SLAVE_PRNG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_qdss_cfg = { + .name = "slv_qhs_qdss_cfg", + .id = SC8180X_SLAVE_QDSS_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_qspi_0 = { + .name = "slv_qhs_qspi_0", + .id = SC8180X_SLAVE_QSPI_0, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_qspi_1 = { + .name = "slv_qhs_qspi_1", + .id = SC8180X_SLAVE_QSPI_1, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_qupv3_east0 = { + .name = "slv_qhs_qupv3_east0", + .id = SC8180X_SLAVE_QUP_1, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_qupv3_east1 = { + .name = "slv_qhs_qupv3_east1", + .id = SC8180X_SLAVE_QUP_2, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_qupv3_west = { + .name = "slv_qhs_qupv3_west", + .id = SC8180X_SLAVE_QUP_0, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_sdc2 = { + .name = "slv_qhs_sdc2", + .id = SC8180X_SLAVE_SDCC_2, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_sdc4 = { + .name = "slv_qhs_sdc4", + .id = SC8180X_SLAVE_SDCC_4, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_security = { + .name = "slv_qhs_security", + .id = SC8180X_SLAVE_SECURITY, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_snoc_cfg = { + .name = "slv_qhs_snoc_cfg", + .id = SC8180X_SLAVE_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_MASTER_SNOC_CFG } +}; + +static struct qcom_icc_node slv_qhs_spss_cfg = { + .name = "slv_qhs_spss_cfg", + .id = SC8180X_SLAVE_SPSS_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_tcsr = { + .name = "slv_qhs_tcsr", + .id = SC8180X_SLAVE_TCSR, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_tlmm_east = { + .name = "slv_qhs_tlmm_east", + .id = SC8180X_SLAVE_TLMM_EAST, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_tlmm_south = { + .name = "slv_qhs_tlmm_south", + .id = SC8180X_SLAVE_TLMM_SOUTH, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_tlmm_west = { + .name = "slv_qhs_tlmm_west", + .id = SC8180X_SLAVE_TLMM_WEST, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_tsif = { + .name = "slv_qhs_tsif", + .id = SC8180X_SLAVE_TSIF, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ufs_card_cfg = { + .name = "slv_qhs_ufs_card_cfg", + .id = SC8180X_SLAVE_UFS_CARD_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ufs_mem0_cfg = { + .name = "slv_qhs_ufs_mem0_cfg", + .id = SC8180X_SLAVE_UFS_MEM_0_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_ufs_mem1_cfg = { + .name = "slv_qhs_ufs_mem1_cfg", + .id = SC8180X_SLAVE_UFS_MEM_1_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_usb3_0 = { + .name = "slv_qhs_usb3_0", + .id = SC8180X_SLAVE_USB3, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_usb3_1 = { + .name = "slv_qhs_usb3_1", + .id = SC8180X_SLAVE_USB3_1, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_usb3_2 = { + .name = "slv_qhs_usb3_2", + .id = SC8180X_SLAVE_USB3_2, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_venus_cfg = { + .name = "slv_qhs_venus_cfg", + .id = SC8180X_SLAVE_VENUS_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_vsense_ctrl_cfg = { + .name = "slv_qhs_vsense_ctrl_cfg", + .id = SC8180X_SLAVE_VSENSE_CTRL_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_srvc_cnoc = { + .name = "slv_srvc_cnoc", + .id = SC8180X_SLAVE_SERVICE_CNOC, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_gemnoc = { + .name = "slv_qhs_gemnoc", + .id = SC8180X_SLAVE_GEM_NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8180X_MASTER_GEM_NOC_CFG } +}; + +static struct qcom_icc_node slv_qhs_llcc = { + .name = "slv_qhs_llcc", + .id = SC8180X_SLAVE_LLCC_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_mdsp_ms_mpu_cfg = { + .name = "slv_qhs_mdsp_ms_mpu_cfg", + .id = SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qns_ecc = { + .name = "slv_qns_ecc", + .id = SC8180X_SLAVE_ECC, + .channels = 1, + .buswidth = 32 +}; + +static struct qcom_icc_node slv_qns_gem_noc_snoc = { + .name = "slv_qns_gem_noc_snoc", + .id = SC8180X_SLAVE_GEM_NOC_SNOC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_MASTER_GEM_NOC_SNOC } +}; + +static struct qcom_icc_node slv_qns_llcc = { + .name = "slv_qns_llcc", + .id = SC8180X_SLAVE_LLCC, + .channels = 8, + .buswidth = 16, + .num_links = 1, + .links = { SC8180X_MASTER_LLCC } +}; + +static struct qcom_icc_node slv_srvc_gemnoc = { + .name = "slv_srvc_gemnoc", + .id = SC8180X_SLAVE_SERVICE_GEM_NOC, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_srvc_gemnoc1 = { + .name = "slv_srvc_gemnoc1", + .id = SC8180X_SLAVE_SERVICE_GEM_NOC_1, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_ipa_core_slave = { + .name = "slv_ipa_core_slave", + .id = SC8180X_SLAVE_IPA_CORE, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_ebi = { + .name = "slv_ebi", + .id = SC8180X_SLAVE_EBI_CH0, + .channels = 8, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qns2_mem_noc = { + .name = "slv_qns2_mem_noc", + .id = SC8180X_SLAVE_MNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_MASTER_MNOC_SF_MEM_NOC } +}; + +static struct qcom_icc_node slv_qns_mem_noc_hf = { + .name = "slv_qns_mem_noc_hf", + .id = SC8180X_SLAVE_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_MASTER_MNOC_HF_MEM_NOC } +}; + +static struct qcom_icc_node slv_srvc_mnoc = { + .name = "slv_srvc_mnoc", + .id = SC8180X_SLAVE_SERVICE_MNOC, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qhs_apss = { + .name = "slv_qhs_apss", + .id = SC8180X_SLAVE_APPSS, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_qns_cnoc = { + .name = "slv_qns_cnoc", + .id = SC8180X_SNOC_CNOC_SLV, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_SNOC_CNOC_MAS } +}; + +static struct qcom_icc_node slv_qns_gemnoc_gc = { + .name = "slv_qns_gemnoc_gc", + .id = SC8180X_SLAVE_SNOC_GEM_NOC_GC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8180X_MASTER_SNOC_GC_MEM_NOC } +}; + +static struct qcom_icc_node slv_qns_gemnoc_sf = { + .name = "slv_qns_gemnoc_sf", + .id = SC8180X_SLAVE_SNOC_GEM_NOC_SF, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8180X_MASTER_SNOC_SF_MEM_NOC } +}; + +static struct qcom_icc_node slv_qxs_imem = { + .name = "slv_qxs_imem", + .id = SC8180X_SLAVE_OCIMEM, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_qxs_pimem = { + .name = "slv_qxs_pimem", + .id = SC8180X_SLAVE_PIMEM, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_srvc_snoc = { + .name = "slv_srvc_snoc", + .id = SC8180X_SLAVE_SERVICE_SNOC, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_xs_pcie_0 = { + .name = "slv_xs_pcie_0", + .id = SC8180X_SLAVE_PCIE_0, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_xs_pcie_1 = { + .name = "slv_xs_pcie_1", + .id = SC8180X_SLAVE_PCIE_1, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_xs_pcie_2 = { + .name = "slv_xs_pcie_2", + .id = SC8180X_SLAVE_PCIE_2, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_xs_pcie_3 = { + .name = "slv_xs_pcie_3", + .id = SC8180X_SLAVE_PCIE_3, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_xs_qdss_stm = { + .name = "slv_xs_qdss_stm", + .id = SC8180X_SLAVE_QDSS_STM, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_xs_sys_tcu_cfg = { + .name = "slv_xs_sys_tcu_cfg", + .id = SC8180X_SLAVE_TCU, + .channels = 1, + .buswidth = 8 +}; + +static struct qcom_icc_node slv_qup_core_0 = { + .name = "slv_qup_core_0", + .id = SC8180X_SLAVE_QUP_CORE_0, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qup_core_1 = { + .name = "slv_qup_core_1", + .id = SC8180X_SLAVE_QUP_CORE_1, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_node slv_qup_core_2 = { + .name = "slv_qup_core_2", + .id = SC8180X_SLAVE_QUP_CORE_2, + .channels = 1, + .buswidth = 4 +}; + +static struct qcom_icc_bcm bcm_acv = { + .name = "ACV", + .num_nodes = 1, + .nodes = { &slv_ebi } +}; + +static struct qcom_icc_bcm bcm_mc0 = { + .name = "MC0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &slv_ebi } +}; + +static struct qcom_icc_bcm bcm_sh0 = { + .name = "SH0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &slv_qns_llcc } +}; + +static struct qcom_icc_bcm bcm_mm0 = { + .name = "MM0", + .num_nodes = 1, + .nodes = { &slv_qns_mem_noc_hf } +}; + +static struct qcom_icc_bcm bcm_co0 = { + .name = "CO0", + .num_nodes = 1, + .nodes = { &slv_qns_cdsp_mem_noc } +}; + +static struct qcom_icc_bcm bcm_ce0 = { + .name = "CE0", + .num_nodes = 1, + .nodes = { &mas_qxm_crypto } +}; + +static struct qcom_icc_bcm bcm_cn0 = { + .name = "CN0", + .keepalive = true, + .num_nodes = 57, + .nodes = { &mas_qnm_snoc, + &slv_qhs_a1_noc_cfg, + &slv_qhs_a2_noc_cfg, + &slv_qhs_ahb2phy_refgen_center, + &slv_qhs_ahb2phy_refgen_east, + &slv_qhs_ahb2phy_refgen_west, + &slv_qhs_ahb2phy_south, + &slv_qhs_aop, + &slv_qhs_aoss, + &slv_qhs_camera_cfg, + &slv_qhs_clk_ctl, + &slv_qhs_compute_dsp, + &slv_qhs_cpr_cx, + &slv_qhs_cpr_mmcx, + &slv_qhs_cpr_mx, + &slv_qhs_crypto0_cfg, + &slv_qhs_ddrss_cfg, + &slv_qhs_display_cfg, + &slv_qhs_emac_cfg, + &slv_qhs_glm, + &slv_qhs_gpuss_cfg, + &slv_qhs_imem_cfg, + &slv_qhs_ipa, + &slv_qhs_mnoc_cfg, + &slv_qhs_npu_cfg, + &slv_qhs_pcie0_cfg, + &slv_qhs_pcie1_cfg, + &slv_qhs_pcie2_cfg, + &slv_qhs_pcie3_cfg, + &slv_qhs_pdm, + &slv_qhs_pimem_cfg, + &slv_qhs_prng, + &slv_qhs_qdss_cfg, + &slv_qhs_qspi_0, + &slv_qhs_qspi_1, + &slv_qhs_qupv3_east0, + &slv_qhs_qupv3_east1, + &slv_qhs_qupv3_west, + &slv_qhs_sdc2, + &slv_qhs_sdc4, + &slv_qhs_security, + &slv_qhs_snoc_cfg, + &slv_qhs_spss_cfg, + &slv_qhs_tcsr, + &slv_qhs_tlmm_east, + &slv_qhs_tlmm_south, + &slv_qhs_tlmm_west, + &slv_qhs_tsif, + &slv_qhs_ufs_card_cfg, + &slv_qhs_ufs_mem0_cfg, + &slv_qhs_ufs_mem1_cfg, + &slv_qhs_usb3_0, + &slv_qhs_usb3_1, + &slv_qhs_usb3_2, + &slv_qhs_venus_cfg, + &slv_qhs_vsense_ctrl_cfg, + &slv_srvc_cnoc } +}; + +static struct qcom_icc_bcm bcm_mm1 = { + .name = "MM1", + .num_nodes = 7, + .nodes = { &mas_qxm_camnoc_hf0_uncomp, + &mas_qxm_camnoc_hf1_uncomp, + &mas_qxm_camnoc_sf_uncomp, + &mas_qxm_camnoc_hf0, + &mas_qxm_camnoc_hf1, + &mas_qxm_mdp0, + &mas_qxm_mdp1 } +}; + +static struct qcom_icc_bcm bcm_qup0 = { + .name = "QUP0", + .num_nodes = 3, + .nodes = { &mas_qup_core_0, + &mas_qup_core_1, + &mas_qup_core_2 } +}; + +static struct qcom_icc_bcm bcm_sh2 = { + .name = "SH2", + .num_nodes = 1, + .nodes = { &slv_qns_gem_noc_snoc } +}; + +static struct qcom_icc_bcm bcm_mm2 = { + .name = "MM2", + .num_nodes = 6, + .nodes = { &mas_qxm_camnoc_sf, + &mas_qxm_rot, + &mas_qxm_venus0, + &mas_qxm_venus1, + &mas_qxm_venus_arm9, + &slv_qns2_mem_noc } +}; + +static struct qcom_icc_bcm bcm_sh3 = { + .name = "SH3", + .keepalive = true, + .num_nodes = 1, + .nodes = { &mas_acm_apps } +}; + +static struct qcom_icc_bcm bcm_sn0 = { + .name = "SN0", + .nodes = { &slv_qns_gemnoc_sf } +}; + +static struct qcom_icc_bcm bcm_sn1 = { + .name = "SN1", + .nodes = { &slv_qxs_imem } +}; + +static struct qcom_icc_bcm bcm_sn2 = { + .name = "SN2", + .keepalive = true, + .nodes = { &slv_qns_gemnoc_gc } +}; + +static struct qcom_icc_bcm bcm_co2 = { + .name = "CO2", + .nodes = { &mas_qnm_npu } +}; + +static struct qcom_icc_bcm bcm_ip0 = { + .name = "IP0", + .nodes = { &slv_ipa_core_slave } +}; + +static struct qcom_icc_bcm bcm_sn3 = { + .name = "SN3", + .keepalive = true, + .nodes = { &slv_srvc_aggre1_noc, + &slv_qns_cnoc } +}; + +static struct qcom_icc_bcm bcm_sn4 = { + .name = "SN4", + .nodes = { &slv_qxs_pimem } +}; + +static struct qcom_icc_bcm bcm_sn8 = { + .name = "SN8", + .num_nodes = 4, + .nodes = { &slv_xs_pcie_0, + &slv_xs_pcie_1, + &slv_xs_pcie_2, + &slv_xs_pcie_3 } +}; + +static struct qcom_icc_bcm bcm_sn9 = { + .name = "SN9", + .num_nodes = 1, + .nodes = { &mas_qnm_aggre1_noc } +}; + +static struct qcom_icc_bcm bcm_sn11 = { + .name = "SN11", + .num_nodes = 1, + .nodes = { &mas_qnm_aggre2_noc } +}; + +static struct qcom_icc_bcm bcm_sn14 = { + .name = "SN14", + .num_nodes = 1, + .nodes = { &slv_qns_pcie_mem_noc } +}; + +static struct qcom_icc_bcm bcm_sn15 = { + .name = "SN15", + .keepalive = true, + .num_nodes = 1, + .nodes = { &mas_qnm_gemnoc } +}; + +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { &bcm_sn3, &bcm_ce0, - &bcm_qup0, }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_sn14, &bcm_ce0, - &bcm_qup0, }; -static struct qcom_icc_bcm *camnoc_virt_bcms[] = { +static struct qcom_icc_bcm * const camnoc_virt_bcms[] = { &bcm_mm1, }; -static struct qcom_icc_bcm *compute_noc_bcms[] = { +static struct qcom_icc_bcm * const compute_noc_bcms[] = { &bcm_co0, &bcm_co2, }; -static struct qcom_icc_bcm *config_noc_bcms[] = { +static struct qcom_icc_bcm * const config_noc_bcms[] = { &bcm_cn0, }; -static struct qcom_icc_bcm *gem_noc_bcms[] = { +static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh2, &bcm_sh3, }; -static struct qcom_icc_bcm *ipa_virt_bcms[] = { +static struct qcom_icc_bcm * const ipa_virt_bcms[] = { &bcm_ip0, }; -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_mc0, &bcm_acv, }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm2, }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn1, &bcm_sn2, @@ -249,7 +1631,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = { &bcm_sn15, }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_A1NOC_CFG] = &mas_qhm_a1noc_cfg, [MASTER_UFS_CARD] = &mas_xm_ufs_card, [MASTER_UFS_GEN4] = &mas_xm_ufs_g4, @@ -261,7 +1643,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_SERVICE_A1NOC] = &slv_srvc_aggre1_noc, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_A2NOC_CFG] = &mas_qhm_a2noc_cfg, [MASTER_QDSS_BAM] = &mas_qhm_qdss_bam, [MASTER_QSPI_0] = &mas_qhm_qspi, @@ -285,19 +1667,19 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &slv_srvc_aggre2_noc, }; -static struct qcom_icc_node *camnoc_virt_nodes[] = { +static struct qcom_icc_node * const camnoc_virt_nodes[] = { [MASTER_CAMNOC_HF0_UNCOMP] = &mas_qxm_camnoc_hf0_uncomp, [MASTER_CAMNOC_HF1_UNCOMP] = &mas_qxm_camnoc_hf1_uncomp, [MASTER_CAMNOC_SF_UNCOMP] = &mas_qxm_camnoc_sf_uncomp, [SLAVE_CAMNOC_UNCOMP] = &slv_qns_camnoc_uncomp, }; -static struct qcom_icc_node *compute_noc_nodes[] = { +static struct qcom_icc_node * const compute_noc_nodes[] = { [MASTER_NPU] = &mas_qnm_npu, [SLAVE_CDSP_MEM_NOC] = &slv_qns_cdsp_mem_noc, }; -static struct qcom_icc_node *config_noc_nodes[] = { +static struct qcom_icc_node * const config_noc_nodes[] = { [SNOC_CNOC_MAS] = &mas_qnm_snoc, [SLAVE_A1NOC_CFG] = &slv_qhs_a1_noc_cfg, [SLAVE_A2NOC_CFG] = &slv_qhs_a2_noc_cfg, @@ -357,13 +1739,13 @@ static struct qcom_icc_node *config_noc_nodes[] = { [SLAVE_SERVICE_CNOC] = &slv_srvc_cnoc, }; -static struct qcom_icc_node *dc_noc_nodes[] = { +static struct qcom_icc_node * const dc_noc_nodes[] = { [MASTER_CNOC_DC_NOC] = &mas_qhm_cnoc_dc_noc, [SLAVE_GEM_NOC_CFG] = &slv_qhs_gemnoc, [SLAVE_LLCC_CFG] = &slv_qhs_llcc, }; -static struct qcom_icc_node *gem_noc_nodes[] = { +static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_AMPSS_M0] = &mas_acm_apps, [MASTER_GPU_TCU] = &mas_acm_gpu_tcu, [MASTER_SYS_TCU] = &mas_acm_sys_tcu, @@ -384,17 +1766,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = { [SLAVE_SERVICE_GEM_NOC_1] = &slv_srvc_gemnoc1, }; -static struct qcom_icc_node *ipa_virt_nodes[] = { +static struct qcom_icc_node * const ipa_virt_nodes[] = { [MASTER_IPA_CORE] = &mas_ipa_core_master, [SLAVE_IPA_CORE] = &slv_ipa_core_slave, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &mas_llcc_mc, [SLAVE_EBI_CH0] = &slv_ebi, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CNOC_MNOC_CFG] = &mas_qhm_mnoc_cfg, [MASTER_CAMNOC_HF0] = &mas_qxm_camnoc_hf0, [MASTER_CAMNOC_HF1] = &mas_qxm_camnoc_hf1, @@ -410,7 +1792,7 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_SNOC_CFG] = &mas_qhm_snoc_cfg, [A1NOC_SNOC_MAS] = &mas_qnm_aggre1_noc, [A2NOC_SNOC_MAS] = &mas_qnm_aggre2_noc, @@ -503,97 +1885,25 @@ static const struct qcom_icc_desc sc8180x_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node **qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate = of_icc_xlate_onecell; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - return icc_provider_del(&qp->provider); -} +static struct qcom_icc_bcm * const qup_virt_bcms[] = { + &bcm_qup0, +}; + +static struct qcom_icc_node *qup_virt_nodes[] = { + [MASTER_QUP_CORE_0] = &mas_qup_core_0, + [MASTER_QUP_CORE_1] = &mas_qup_core_1, + [MASTER_QUP_CORE_2] = &mas_qup_core_2, + [SLAVE_QUP_CORE_0] = &slv_qup_core_0, + [SLAVE_QUP_CORE_1] = &slv_qup_core_1, + [SLAVE_QUP_CORE_2] = &slv_qup_core_2, +}; + +static const struct qcom_icc_desc sc8180x_qup_virt = { + .nodes = qup_virt_nodes, + .num_nodes = ARRAY_SIZE(qup_virt_nodes), + .bcms = qup_virt_bcms, + .num_bcms = ARRAY_SIZE(qup_virt_bcms), +}; static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sc8180x-aggre1-noc", .data = &sc8180x_aggre1_noc }, @@ -606,14 +1916,15 @@ static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sc8180x-ipa-virt", .data = &sc8180x_ipa_virt }, { .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt }, { .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc }, + { .compatible = "qcom,sc8180x-qup-virt", .data = &sc8180x_qup_virt }, { .compatible = "qcom,sc8180x-system-noc", .data = &sc8180x_system_noc }, { } }; MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sc8180x", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h index e70cf7032f80..2eafd35543c7 100644 --- a/drivers/interconnect/qcom/sc8180x.h +++ b/drivers/interconnect/qcom/sc8180x.h @@ -171,4 +171,11 @@ #define SC8180X_MASTER_OSM_L3_APPS 161 #define SC8180X_SLAVE_OSM_L3 162 +#define SC8180X_MASTER_QUP_CORE_0 163 +#define SC8180X_MASTER_QUP_CORE_1 164 +#define SC8180X_MASTER_QUP_CORE_2 165 +#define SC8180X_SLAVE_QUP_CORE_0 166 +#define SC8180X_SLAVE_QUP_CORE_1 167 +#define SC8180X_SLAVE_QUP_CORE_2 168 + #endif diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c new file mode 100644 index 000000000000..507fe5f89791 --- /dev/null +++ b/drivers/interconnect/qcom/sc8280xp.c @@ -0,0 +1,2438 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Linaro Ltd + */ + +#include <linux/device.h> +#include <linux/interconnect.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <dt-bindings/interconnect/qcom,sc8280xp.h> + +#include "bcm-voter.h" +#include "icc-rpmh.h" +#include "sc8280xp.h" + +static struct qcom_icc_node qhm_qspi = { + .name = "qhm_qspi", + .id = SC8280XP_MASTER_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup1 = { + .name = "qhm_qup1", + .id = SC8280XP_MASTER_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup2 = { + .name = "qhm_qup2", + .id = SC8280XP_MASTER_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qnm_a1noc_cfg = { + .name = "qnm_a1noc_cfg", + .id = SC8280XP_MASTER_A1NOC_CFG, + .channels = 1, + .buswidth = 4, + .links = { SC8280XP_SLAVE_SERVICE_A1NOC }, +}; + +static struct qcom_icc_node qxm_ipa = { + .name = "qxm_ipa", + .id = SC8280XP_MASTER_IPA, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_emac_1 = { + .name = "xm_emac_1", + .id = SC8280XP_MASTER_EMAC_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_sdc4 = { + .name = "xm_sdc4", + .id = SC8280XP_MASTER_SDCC_4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_ufs_mem = { + .name = "xm_ufs_mem", + .id = SC8280XP_MASTER_UFS_MEM, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb3_0 = { + .name = "xm_usb3_0", + .id = SC8280XP_MASTER_USB3_0, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb3_1 = { + .name = "xm_usb3_1", + .id = SC8280XP_MASTER_USB3_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb3_mp = { + .name = "xm_usb3_mp", + .id = SC8280XP_MASTER_USB3_MP, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb4_host0 = { + .name = "xm_usb4_host0", + .id = SC8280XP_MASTER_USB4_0, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb4_host1 = { + .name = "xm_usb4_host1", + .id = SC8280XP_MASTER_USB4_1, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qdss_bam = { + .name = "qhm_qdss_bam", + .id = SC8280XP_MASTER_QDSS_BAM, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup0 = { + .name = "qhm_qup0", + .id = SC8280XP_MASTER_QUP_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qnm_a2noc_cfg = { + .name = "qnm_a2noc_cfg", + .id = SC8280XP_MASTER_A2NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_SERVICE_A2NOC }, +}; + +static struct qcom_icc_node qxm_crypto = { + .name = "qxm_crypto", + .id = SC8280XP_MASTER_CRYPTO, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_sensorss_q6 = { + .name = "qxm_sensorss_q6", + .id = SC8280XP_MASTER_SENSORS_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_sp = { + .name = "qxm_sp", + .id = SC8280XP_MASTER_SP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_emac_0 = { + .name = "xm_emac_0", + .id = SC8280XP_MASTER_EMAC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_pcie3_0 = { + .name = "xm_pcie3_0", + .id = SC8280XP_MASTER_PCIE_0, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_1 = { + .name = "xm_pcie3_1", + .id = SC8280XP_MASTER_PCIE_1, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_2a = { + .name = "xm_pcie3_2a", + .id = SC8280XP_MASTER_PCIE_2A, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_2b = { + .name = "xm_pcie3_2b", + .id = SC8280XP_MASTER_PCIE_2B, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_3a = { + .name = "xm_pcie3_3a", + .id = SC8280XP_MASTER_PCIE_3A, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_3b = { + .name = "xm_pcie3_3b", + .id = SC8280XP_MASTER_PCIE_3B, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_4 = { + .name = "xm_pcie3_4", + .id = SC8280XP_MASTER_PCIE_4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_qdss_etr = { + .name = "xm_qdss_etr", + .id = SC8280XP_MASTER_QDSS_ETR, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_sdc2 = { + .name = "xm_sdc2", + .id = SC8280XP_MASTER_SDCC_2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_ufs_card = { + .name = "xm_ufs_card", + .id = SC8280XP_MASTER_UFS_CARD, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node ipa_core_master = { + .name = "ipa_core_master", + .id = SC8280XP_MASTER_IPA_CORE, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_IPA_CORE }, +}; + +static struct qcom_icc_node qup0_core_master = { + .name = "qup0_core_master", + .id = SC8280XP_MASTER_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_QUP_CORE_0 }, +}; + +static struct qcom_icc_node qup1_core_master = { + .name = "qup1_core_master", + .id = SC8280XP_MASTER_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_QUP_CORE_1 }, +}; + +static struct qcom_icc_node qup2_core_master = { + .name = "qup2_core_master", + .id = SC8280XP_MASTER_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_QUP_CORE_2 }, +}; + +static struct qcom_icc_node qnm_gemnoc_cnoc = { + .name = "qnm_gemnoc_cnoc", + .id = SC8280XP_MASTER_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 76, + .links = { SC8280XP_SLAVE_AHB2PHY_0, + SC8280XP_SLAVE_AHB2PHY_1, + SC8280XP_SLAVE_AHB2PHY_2, + SC8280XP_SLAVE_AOSS, + SC8280XP_SLAVE_APPSS, + SC8280XP_SLAVE_CAMERA_CFG, + SC8280XP_SLAVE_CLK_CTL, + SC8280XP_SLAVE_CDSP_CFG, + SC8280XP_SLAVE_CDSP1_CFG, + SC8280XP_SLAVE_RBCPR_CX_CFG, + SC8280XP_SLAVE_RBCPR_MMCX_CFG, + SC8280XP_SLAVE_RBCPR_MX_CFG, + SC8280XP_SLAVE_CPR_NSPCX, + SC8280XP_SLAVE_CRYPTO_0_CFG, + SC8280XP_SLAVE_CX_RDPM, + SC8280XP_SLAVE_DCC_CFG, + SC8280XP_SLAVE_DISPLAY_CFG, + SC8280XP_SLAVE_DISPLAY1_CFG, + SC8280XP_SLAVE_EMAC_CFG, + SC8280XP_SLAVE_EMAC1_CFG, + SC8280XP_SLAVE_GFX3D_CFG, + SC8280XP_SLAVE_HWKM, + SC8280XP_SLAVE_IMEM_CFG, + SC8280XP_SLAVE_IPA_CFG, + SC8280XP_SLAVE_IPC_ROUTER_CFG, + SC8280XP_SLAVE_LPASS, + SC8280XP_SLAVE_MX_RDPM, + SC8280XP_SLAVE_MXC_RDPM, + SC8280XP_SLAVE_PCIE_0_CFG, + SC8280XP_SLAVE_PCIE_1_CFG, + SC8280XP_SLAVE_PCIE_2A_CFG, + SC8280XP_SLAVE_PCIE_2B_CFG, + SC8280XP_SLAVE_PCIE_3A_CFG, + SC8280XP_SLAVE_PCIE_3B_CFG, + SC8280XP_SLAVE_PCIE_4_CFG, + SC8280XP_SLAVE_PCIE_RSC_CFG, + SC8280XP_SLAVE_PDM, + SC8280XP_SLAVE_PIMEM_CFG, + SC8280XP_SLAVE_PKA_WRAPPER_CFG, + SC8280XP_SLAVE_PMU_WRAPPER_CFG, + SC8280XP_SLAVE_QDSS_CFG, + SC8280XP_SLAVE_QSPI_0, + SC8280XP_SLAVE_QUP_0, + SC8280XP_SLAVE_QUP_1, + SC8280XP_SLAVE_QUP_2, + SC8280XP_SLAVE_SDCC_2, + SC8280XP_SLAVE_SDCC_4, + SC8280XP_SLAVE_SECURITY, + SC8280XP_SLAVE_SMMUV3_CFG, + SC8280XP_SLAVE_SMSS_CFG, + SC8280XP_SLAVE_SPSS_CFG, + SC8280XP_SLAVE_TCSR, + SC8280XP_SLAVE_TLMM, + SC8280XP_SLAVE_UFS_CARD_CFG, + SC8280XP_SLAVE_UFS_MEM_CFG, + SC8280XP_SLAVE_USB3_0, + SC8280XP_SLAVE_USB3_1, + SC8280XP_SLAVE_USB3_MP, + SC8280XP_SLAVE_USB4_0, + SC8280XP_SLAVE_USB4_1, + SC8280XP_SLAVE_VENUS_CFG, + SC8280XP_SLAVE_VSENSE_CTRL_CFG, + SC8280XP_SLAVE_VSENSE_CTRL_R_CFG, + SC8280XP_SLAVE_A1NOC_CFG, + SC8280XP_SLAVE_A2NOC_CFG, + SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG, + SC8280XP_SLAVE_DDRSS_CFG, + SC8280XP_SLAVE_CNOC_MNOC_CFG, + SC8280XP_SLAVE_SNOC_CFG, + SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG, + SC8280XP_SLAVE_IMEM, + SC8280XP_SLAVE_PIMEM, + SC8280XP_SLAVE_SERVICE_CNOC, + SC8280XP_SLAVE_QDSS_STM, + SC8280XP_SLAVE_SMSS, + SC8280XP_SLAVE_TCU + }, +}; + +static struct qcom_icc_node qnm_gemnoc_pcie = { + .name = "qnm_gemnoc_pcie", + .id = SC8280XP_MASTER_GEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 7, + .links = { SC8280XP_SLAVE_PCIE_0, + SC8280XP_SLAVE_PCIE_1, + SC8280XP_SLAVE_PCIE_2A, + SC8280XP_SLAVE_PCIE_2B, + SC8280XP_SLAVE_PCIE_3A, + SC8280XP_SLAVE_PCIE_3B, + SC8280XP_SLAVE_PCIE_4 + }, +}; + +static struct qcom_icc_node qnm_cnoc_dc_noc = { + .name = "qnm_cnoc_dc_noc", + .id = SC8280XP_MASTER_CNOC_DC_NOC, + .channels = 1, + .buswidth = 4, + .num_links = 2, + .links = { SC8280XP_SLAVE_LLCC_CFG, + SC8280XP_SLAVE_GEM_NOC_CFG + }, +}; + +static struct qcom_icc_node alm_gpu_tcu = { + .name = "alm_gpu_tcu", + .id = SC8280XP_MASTER_GPU_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node alm_pcie_tcu = { + .name = "alm_pcie_tcu", + .id = SC8280XP_MASTER_PCIE_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node alm_sys_tcu = { + .name = "alm_sys_tcu", + .id = SC8280XP_MASTER_SYS_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node chm_apps = { + .name = "chm_apps", + .id = SC8280XP_MASTER_APPSS_PROC, + .channels = 2, + .buswidth = 32, + .num_links = 3, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC, + SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC + }, +}; + +static struct qcom_icc_node qnm_cmpnoc0 = { + .name = "qnm_cmpnoc0", + .id = SC8280XP_MASTER_COMPUTE_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node qnm_cmpnoc1 = { + .name = "qnm_cmpnoc1", + .id = SC8280XP_MASTER_COMPUTE_NOC_1, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node qnm_gemnoc_cfg = { + .name = "qnm_gemnoc_cfg", + .id = SC8280XP_MASTER_GEM_NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 3, + .links = { SC8280XP_SLAVE_SERVICE_GEM_NOC_1, + SC8280XP_SLAVE_SERVICE_GEM_NOC_2, + SC8280XP_SLAVE_SERVICE_GEM_NOC + }, +}; + +static struct qcom_icc_node qnm_gpu = { + .name = "qnm_gpu", + .id = SC8280XP_MASTER_GFX3D, + .channels = 4, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node qnm_mnoc_hf = { + .name = "qnm_mnoc_hf", + .id = SC8280XP_MASTER_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_LLCC, + SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC + }, +}; + +static struct qcom_icc_node qnm_mnoc_sf = { + .name = "qnm_mnoc_sf", + .id = SC8280XP_MASTER_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node qnm_pcie = { + .name = "qnm_pcie", + .id = SC8280XP_MASTER_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC + }, +}; + +static struct qcom_icc_node qnm_snoc_gc = { + .name = "qnm_snoc_gc", + .id = SC8280XP_MASTER_SNOC_GC_MEM_NOC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_snoc_sf = { + .name = "qnm_snoc_sf", + .id = SC8280XP_MASTER_SNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 3, + .links = { SC8280XP_SLAVE_GEM_NOC_CNOC, + SC8280XP_SLAVE_LLCC, + SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC }, +}; + +static struct qcom_icc_node qhm_config_noc = { + .name = "qhm_config_noc", + .id = SC8280XP_MASTER_CNOC_LPASS_AG_NOC, + .channels = 1, + .buswidth = 4, + .num_links = 6, + .links = { SC8280XP_SLAVE_LPASS_CORE_CFG, + SC8280XP_SLAVE_LPASS_LPI_CFG, + SC8280XP_SLAVE_LPASS_MPU_CFG, + SC8280XP_SLAVE_LPASS_TOP_CFG, + SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC, + SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC + }, +}; + +static struct qcom_icc_node qxm_lpass_dsp = { + .name = "qxm_lpass_dsp", + .id = SC8280XP_MASTER_LPASS_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 4, + .links = { SC8280XP_SLAVE_LPASS_TOP_CFG, + SC8280XP_SLAVE_LPASS_SNOC, + SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC, + SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC + }, +}; + +static struct qcom_icc_node llcc_mc = { + .name = "llcc_mc", + .id = SC8280XP_MASTER_LLCC, + .channels = 8, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_EBI1 }, +}; + +static struct qcom_icc_node qnm_camnoc_hf = { + .name = "qnm_camnoc_hf", + .id = SC8280XP_MASTER_CAMNOC_HF, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mdp0_0 = { + .name = "qnm_mdp0_0", + .id = SC8280XP_MASTER_MDP0, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mdp0_1 = { + .name = "qnm_mdp0_1", + .id = SC8280XP_MASTER_MDP1, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mdp1_0 = { + .name = "qnm_mdp1_0", + .id = SC8280XP_MASTER_MDP_CORE1_0, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mdp1_1 = { + .name = "qnm_mdp1_1", + .id = SC8280XP_MASTER_MDP_CORE1_1, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mnoc_cfg = { + .name = "qnm_mnoc_cfg", + .id = SC8280XP_MASTER_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_SERVICE_MNOC }, +}; + +static struct qcom_icc_node qnm_rot_0 = { + .name = "qnm_rot_0", + .id = SC8280XP_MASTER_ROTATOR, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_rot_1 = { + .name = "qnm_rot_1", + .id = SC8280XP_MASTER_ROTATOR_1, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video0 = { + .name = "qnm_video0", + .id = SC8280XP_MASTER_VIDEO_P0, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video1 = { + .name = "qnm_video1", + .id = SC8280XP_MASTER_VIDEO_P1, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_cvp = { + .name = "qnm_video_cvp", + .id = SC8280XP_MASTER_VIDEO_PROC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qxm_camnoc_icp = { + .name = "qxm_camnoc_icp", + .id = SC8280XP_MASTER_CAMNOC_ICP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qxm_camnoc_sf = { + .name = "qxm_camnoc_sf", + .id = SC8280XP_MASTER_CAMNOC_SF, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qhm_nsp_noc_config = { + .name = "qhm_nsp_noc_config", + .id = SC8280XP_MASTER_CDSP_NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_SERVICE_NSP_NOC }, +}; + +static struct qcom_icc_node qxm_nsp = { + .name = "qxm_nsp", + .id = SC8280XP_MASTER_CDSP_PROC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_CDSP_MEM_NOC, + SC8280XP_SLAVE_NSP_XFR + }, +}; + +static struct qcom_icc_node qhm_nspb_noc_config = { + .name = "qhm_nspb_noc_config", + .id = SC8280XP_MASTER_CDSPB_NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_SERVICE_NSPB_NOC }, +}; + +static struct qcom_icc_node qxm_nspb = { + .name = "qxm_nspb", + .id = SC8280XP_MASTER_CDSP_PROC_B, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SC8280XP_SLAVE_CDSPB_MEM_NOC, + SC8280XP_SLAVE_NSPB_XFR + }, +}; + +static struct qcom_icc_node qnm_aggre1_noc = { + .name = "qnm_aggre1_noc", + .id = SC8280XP_MASTER_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_aggre2_noc = { + .name = "qnm_aggre2_noc", + .id = SC8280XP_MASTER_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_aggre_usb_noc = { + .name = "qnm_aggre_usb_noc", + .id = SC8280XP_MASTER_USB_NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_lpass_noc = { + .name = "qnm_lpass_noc", + .id = SC8280XP_MASTER_LPASS_ANOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_snoc_cfg = { + .name = "qnm_snoc_cfg", + .id = SC8280XP_MASTER_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_SLAVE_SERVICE_SNOC }, +}; + +static struct qcom_icc_node qxm_pimem = { + .name = "qxm_pimem", + .id = SC8280XP_MASTER_PIMEM, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC }, +}; + +static struct qcom_icc_node xm_gic = { + .name = "xm_gic", + .id = SC8280XP_MASTER_GIC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC }, +}; + +static struct qcom_icc_node qns_a1noc_snoc = { + .name = "qns_a1noc_snoc", + .id = SC8280XP_SLAVE_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qns_aggre_usb_snoc = { + .name = "qns_aggre_usb_snoc", + .id = SC8280XP_SLAVE_USB_NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node srvc_aggre1_noc = { + .name = "srvc_aggre1_noc", + .id = SC8280XP_SLAVE_SERVICE_A1NOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_a2noc_snoc = { + .name = "qns_a2noc_snoc", + .id = SC8280XP_SLAVE_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qns_pcie_gem_noc = { + .name = "qns_pcie_gem_noc", + .id = SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_MASTER_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node srvc_aggre2_noc = { + .name = "srvc_aggre2_noc", + .id = SC8280XP_SLAVE_SERVICE_A2NOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node ipa_core_slave = { + .name = "ipa_core_slave", + .id = SC8280XP_SLAVE_IPA_CORE, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node qup0_core_slave = { + .name = "qup0_core_slave", + .id = SC8280XP_SLAVE_QUP_CORE_0, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qup1_core_slave = { + .name = "qup1_core_slave", + .id = SC8280XP_SLAVE_QUP_CORE_1, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qup2_core_slave = { + .name = "qup2_core_slave", + .id = SC8280XP_SLAVE_QUP_CORE_2, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_ahb2phy0 = { + .name = "qhs_ahb2phy0", + .id = SC8280XP_SLAVE_AHB2PHY_0, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_ahb2phy1 = { + .name = "qhs_ahb2phy1", + .id = SC8280XP_SLAVE_AHB2PHY_1, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_ahb2phy2 = { + .name = "qhs_ahb2phy2", + .id = SC8280XP_SLAVE_AHB2PHY_2, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_aoss = { + .name = "qhs_aoss", + .id = SC8280XP_SLAVE_AOSS, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_apss = { + .name = "qhs_apss", + .id = SC8280XP_SLAVE_APPSS, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node qhs_camera_cfg = { + .name = "qhs_camera_cfg", + .id = SC8280XP_SLAVE_CAMERA_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_clk_ctl = { + .name = "qhs_clk_ctl", + .id = SC8280XP_SLAVE_CLK_CTL, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_compute0_cfg = { + .name = "qhs_compute0_cfg", + .id = SC8280XP_SLAVE_CDSP_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_CDSP_NOC_CFG }, +}; + +static struct qcom_icc_node qhs_compute1_cfg = { + .name = "qhs_compute1_cfg", + .id = SC8280XP_SLAVE_CDSP1_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_CDSPB_NOC_CFG }, +}; + +static struct qcom_icc_node qhs_cpr_cx = { + .name = "qhs_cpr_cx", + .id = SC8280XP_SLAVE_RBCPR_CX_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_cpr_mmcx = { + .name = "qhs_cpr_mmcx", + .id = SC8280XP_SLAVE_RBCPR_MMCX_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_cpr_mx = { + .name = "qhs_cpr_mx", + .id = SC8280XP_SLAVE_RBCPR_MX_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_cpr_nspcx = { + .name = "qhs_cpr_nspcx", + .id = SC8280XP_SLAVE_CPR_NSPCX, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_crypto0_cfg = { + .name = "qhs_crypto0_cfg", + .id = SC8280XP_SLAVE_CRYPTO_0_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_cx_rdpm = { + .name = "qhs_cx_rdpm", + .id = SC8280XP_SLAVE_CX_RDPM, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_dcc_cfg = { + .name = "qhs_dcc_cfg", + .id = SC8280XP_SLAVE_DCC_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_display0_cfg = { + .name = "qhs_display0_cfg", + .id = SC8280XP_SLAVE_DISPLAY_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_display1_cfg = { + .name = "qhs_display1_cfg", + .id = SC8280XP_SLAVE_DISPLAY1_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_emac0_cfg = { + .name = "qhs_emac0_cfg", + .id = SC8280XP_SLAVE_EMAC_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_emac1_cfg = { + .name = "qhs_emac1_cfg", + .id = SC8280XP_SLAVE_EMAC1_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_gpuss_cfg = { + .name = "qhs_gpuss_cfg", + .id = SC8280XP_SLAVE_GFX3D_CFG, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node qhs_hwkm = { + .name = "qhs_hwkm", + .id = SC8280XP_SLAVE_HWKM, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_imem_cfg = { + .name = "qhs_imem_cfg", + .id = SC8280XP_SLAVE_IMEM_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_ipa = { + .name = "qhs_ipa", + .id = SC8280XP_SLAVE_IPA_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_ipc_router = { + .name = "qhs_ipc_router", + .id = SC8280XP_SLAVE_IPC_ROUTER_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_lpass_cfg = { + .name = "qhs_lpass_cfg", + .id = SC8280XP_SLAVE_LPASS, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_CNOC_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node qhs_mx_rdpm = { + .name = "qhs_mx_rdpm", + .id = SC8280XP_SLAVE_MX_RDPM, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_mxc_rdpm = { + .name = "qhs_mxc_rdpm", + .id = SC8280XP_SLAVE_MXC_RDPM, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie0_cfg = { + .name = "qhs_pcie0_cfg", + .id = SC8280XP_SLAVE_PCIE_0_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie1_cfg = { + .name = "qhs_pcie1_cfg", + .id = SC8280XP_SLAVE_PCIE_1_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie2a_cfg = { + .name = "qhs_pcie2a_cfg", + .id = SC8280XP_SLAVE_PCIE_2A_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie2b_cfg = { + .name = "qhs_pcie2b_cfg", + .id = SC8280XP_SLAVE_PCIE_2B_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie3a_cfg = { + .name = "qhs_pcie3a_cfg", + .id = SC8280XP_SLAVE_PCIE_3A_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie3b_cfg = { + .name = "qhs_pcie3b_cfg", + .id = SC8280XP_SLAVE_PCIE_3B_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie4_cfg = { + .name = "qhs_pcie4_cfg", + .id = SC8280XP_SLAVE_PCIE_4_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pcie_rsc_cfg = { + .name = "qhs_pcie_rsc_cfg", + .id = SC8280XP_SLAVE_PCIE_RSC_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pdm = { + .name = "qhs_pdm", + .id = SC8280XP_SLAVE_PDM, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pimem_cfg = { + .name = "qhs_pimem_cfg", + .id = SC8280XP_SLAVE_PIMEM_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pka_wrapper_cfg = { + .name = "qhs_pka_wrapper_cfg", + .id = SC8280XP_SLAVE_PKA_WRAPPER_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_pmu_wrapper_cfg = { + .name = "qhs_pmu_wrapper_cfg", + .id = SC8280XP_SLAVE_PMU_WRAPPER_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_qdss_cfg = { + .name = "qhs_qdss_cfg", + .id = SC8280XP_SLAVE_QDSS_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_qspi = { + .name = "qhs_qspi", + .id = SC8280XP_SLAVE_QSPI_0, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_qup0 = { + .name = "qhs_qup0", + .id = SC8280XP_SLAVE_QUP_0, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_qup1 = { + .name = "qhs_qup1", + .id = SC8280XP_SLAVE_QUP_1, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_qup2 = { + .name = "qhs_qup2", + .id = SC8280XP_SLAVE_QUP_2, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_sdc2 = { + .name = "qhs_sdc2", + .id = SC8280XP_SLAVE_SDCC_2, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_sdc4 = { + .name = "qhs_sdc4", + .id = SC8280XP_SLAVE_SDCC_4, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_security = { + .name = "qhs_security", + .id = SC8280XP_SLAVE_SECURITY, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_smmuv3_cfg = { + .name = "qhs_smmuv3_cfg", + .id = SC8280XP_SLAVE_SMMUV3_CFG, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node qhs_smss_cfg = { + .name = "qhs_smss_cfg", + .id = SC8280XP_SLAVE_SMSS_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_spss_cfg = { + .name = "qhs_spss_cfg", + .id = SC8280XP_SLAVE_SPSS_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_tcsr = { + .name = "qhs_tcsr", + .id = SC8280XP_SLAVE_TCSR, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_tlmm = { + .name = "qhs_tlmm", + .id = SC8280XP_SLAVE_TLMM, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_ufs_card_cfg = { + .name = "qhs_ufs_card_cfg", + .id = SC8280XP_SLAVE_UFS_CARD_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_ufs_mem_cfg = { + .name = "qhs_ufs_mem_cfg", + .id = SC8280XP_SLAVE_UFS_MEM_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_usb3_0 = { + .name = "qhs_usb3_0", + .id = SC8280XP_SLAVE_USB3_0, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_usb3_1 = { + .name = "qhs_usb3_1", + .id = SC8280XP_SLAVE_USB3_1, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_usb3_mp = { + .name = "qhs_usb3_mp", + .id = SC8280XP_SLAVE_USB3_MP, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_usb4_host_0 = { + .name = "qhs_usb4_host_0", + .id = SC8280XP_SLAVE_USB4_0, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_usb4_host_1 = { + .name = "qhs_usb4_host_1", + .id = SC8280XP_SLAVE_USB4_1, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_venus_cfg = { + .name = "qhs_venus_cfg", + .id = SC8280XP_SLAVE_VENUS_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_vsense_ctrl_cfg = { + .name = "qhs_vsense_ctrl_cfg", + .id = SC8280XP_SLAVE_VSENSE_CTRL_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_vsense_ctrl_r_cfg = { + .name = "qhs_vsense_ctrl_r_cfg", + .id = SC8280XP_SLAVE_VSENSE_CTRL_R_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_a1_noc_cfg = { + .name = "qns_a1_noc_cfg", + .id = SC8280XP_SLAVE_A1NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_A1NOC_CFG }, +}; + +static struct qcom_icc_node qns_a2_noc_cfg = { + .name = "qns_a2_noc_cfg", + .id = SC8280XP_SLAVE_A2NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_A2NOC_CFG }, +}; + +static struct qcom_icc_node qns_anoc_pcie_bridge_cfg = { + .name = "qns_anoc_pcie_bridge_cfg", + .id = SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_ddrss_cfg = { + .name = "qns_ddrss_cfg", + .id = SC8280XP_SLAVE_DDRSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_CNOC_DC_NOC }, +}; + +static struct qcom_icc_node qns_mnoc_cfg = { + .name = "qns_mnoc_cfg", + .id = SC8280XP_SLAVE_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_CNOC_MNOC_CFG }, +}; + +static struct qcom_icc_node qns_snoc_cfg = { + .name = "qns_snoc_cfg", + .id = SC8280XP_SLAVE_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_SNOC_CFG }, +}; + +static struct qcom_icc_node qns_snoc_sf_bridge_cfg = { + .name = "qns_snoc_sf_bridge_cfg", + .id = SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qxs_imem = { + .name = "qxs_imem", + .id = SC8280XP_SLAVE_IMEM, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node qxs_pimem = { + .name = "qxs_pimem", + .id = SC8280XP_SLAVE_PIMEM, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node srvc_cnoc = { + .name = "srvc_cnoc", + .id = SC8280XP_SLAVE_SERVICE_CNOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node xs_pcie_0 = { + .name = "xs_pcie_0", + .id = SC8280XP_SLAVE_PCIE_0, + .channels = 1, + .buswidth = 16, +}; + +static struct qcom_icc_node xs_pcie_1 = { + .name = "xs_pcie_1", + .id = SC8280XP_SLAVE_PCIE_1, + .channels = 1, + .buswidth = 16, +}; + +static struct qcom_icc_node xs_pcie_2a = { + .name = "xs_pcie_2a", + .id = SC8280XP_SLAVE_PCIE_2A, + .channels = 1, + .buswidth = 16, +}; + +static struct qcom_icc_node xs_pcie_2b = { + .name = "xs_pcie_2b", + .id = SC8280XP_SLAVE_PCIE_2B, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node xs_pcie_3a = { + .name = "xs_pcie_3a", + .id = SC8280XP_SLAVE_PCIE_3A, + .channels = 1, + .buswidth = 16, +}; + +static struct qcom_icc_node xs_pcie_3b = { + .name = "xs_pcie_3b", + .id = SC8280XP_SLAVE_PCIE_3B, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node xs_pcie_4 = { + .name = "xs_pcie_4", + .id = SC8280XP_SLAVE_PCIE_4, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node xs_qdss_stm = { + .name = "xs_qdss_stm", + .id = SC8280XP_SLAVE_QDSS_STM, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node xs_smss = { + .name = "xs_smss", + .id = SC8280XP_SLAVE_SMSS, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node xs_sys_tcu_cfg = { + .name = "xs_sys_tcu_cfg", + .id = SC8280XP_SLAVE_TCU, + .channels = 1, + .buswidth = 8, +}; + +static struct qcom_icc_node qhs_llcc = { + .name = "qhs_llcc", + .id = SC8280XP_SLAVE_LLCC_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_gemnoc = { + .name = "qns_gemnoc", + .id = SC8280XP_SLAVE_GEM_NOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SC8280XP_MASTER_GEM_NOC_CFG }, +}; + +static struct qcom_icc_node qns_gem_noc_cnoc = { + .name = "qns_gem_noc_cnoc", + .id = SC8280XP_SLAVE_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_GEM_NOC_CNOC }, +}; + +static struct qcom_icc_node qns_llcc = { + .name = "qns_llcc", + .id = SC8280XP_SLAVE_LLCC, + .channels = 8, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_LLCC }, +}; + +static struct qcom_icc_node qns_pcie = { + .name = "qns_pcie", + .id = SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_GEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node srvc_even_gemnoc = { + .name = "srvc_even_gemnoc", + .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_1, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node srvc_odd_gemnoc = { + .name = "srvc_odd_gemnoc", + .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_2, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node srvc_sys_gemnoc = { + .name = "srvc_sys_gemnoc", + .id = SC8280XP_SLAVE_SERVICE_GEM_NOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_lpass_core = { + .name = "qhs_lpass_core", + .id = SC8280XP_SLAVE_LPASS_CORE_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_lpass_lpi = { + .name = "qhs_lpass_lpi", + .id = SC8280XP_SLAVE_LPASS_LPI_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_lpass_mpu = { + .name = "qhs_lpass_mpu", + .id = SC8280XP_SLAVE_LPASS_MPU_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qhs_lpass_top = { + .name = "qhs_lpass_top", + .id = SC8280XP_SLAVE_LPASS_TOP_CFG, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_sysnoc = { + .name = "qns_sysnoc", + .id = SC8280XP_SLAVE_LPASS_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_LPASS_ANOC }, +}; + +static struct qcom_icc_node srvc_niu_aml_noc = { + .name = "srvc_niu_aml_noc", + .id = SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node srvc_niu_lpass_agnoc = { + .name = "srvc_niu_lpass_agnoc", + .id = SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node ebi = { + .name = "ebi", + .id = SC8280XP_SLAVE_EBI1, + .channels = 8, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_mem_noc_hf = { + .name = "qns_mem_noc_hf", + .id = SC8280XP_SLAVE_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_MASTER_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qns_mem_noc_sf = { + .name = "qns_mem_noc_sf", + .id = SC8280XP_SLAVE_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_MASTER_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_mnoc = { + .name = "srvc_mnoc", + .id = SC8280XP_SLAVE_SERVICE_MNOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_nsp_gemnoc = { + .name = "qns_nsp_gemnoc", + .id = SC8280XP_SLAVE_CDSP_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_MASTER_COMPUTE_NOC }, +}; + +static struct qcom_icc_node qxs_nsp_xfr = { + .name = "qxs_nsp_xfr", + .id = SC8280XP_SLAVE_NSP_XFR, + .channels = 1, + .buswidth = 32, +}; + +static struct qcom_icc_node service_nsp_noc = { + .name = "service_nsp_noc", + .id = SC8280XP_SLAVE_SERVICE_NSP_NOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_nspb_gemnoc = { + .name = "qns_nspb_gemnoc", + .id = SC8280XP_SLAVE_CDSPB_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SC8280XP_MASTER_COMPUTE_NOC_1 }, +}; + +static struct qcom_icc_node qxs_nspb_xfr = { + .name = "qxs_nspb_xfr", + .id = SC8280XP_SLAVE_NSPB_XFR, + .channels = 1, + .buswidth = 32, +}; + +static struct qcom_icc_node service_nspb_noc = { + .name = "service_nspb_noc", + .id = SC8280XP_SLAVE_SERVICE_NSPB_NOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_node qns_gemnoc_gc = { + .name = "qns_gemnoc_gc", + .id = SC8280XP_SLAVE_SNOC_GEM_NOC_GC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SC8280XP_MASTER_SNOC_GC_MEM_NOC }, +}; + +static struct qcom_icc_node qns_gemnoc_sf = { + .name = "qns_gemnoc_sf", + .id = SC8280XP_SLAVE_SNOC_GEM_NOC_SF, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SC8280XP_MASTER_SNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_snoc = { + .name = "srvc_snoc", + .id = SC8280XP_SLAVE_SERVICE_SNOC, + .channels = 1, + .buswidth = 4, +}; + +static struct qcom_icc_bcm bcm_acv = { + .name = "ACV", + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_ce0 = { + .name = "CE0", + .num_nodes = 1, + .nodes = { &qxm_crypto }, +}; + +static struct qcom_icc_bcm bcm_cn0 = { + .name = "CN0", + .keepalive = true, + .num_nodes = 9, + .nodes = { &qnm_gemnoc_cnoc, + &qnm_gemnoc_pcie, + &xs_pcie_0, + &xs_pcie_1, + &xs_pcie_2a, + &xs_pcie_2b, + &xs_pcie_3a, + &xs_pcie_3b, + &xs_pcie_4 + }, +}; + +static struct qcom_icc_bcm bcm_cn1 = { + .name = "CN1", + .num_nodes = 67, + .nodes = { &qhs_ahb2phy0, + &qhs_ahb2phy1, + &qhs_ahb2phy2, + &qhs_aoss, + &qhs_apss, + &qhs_camera_cfg, + &qhs_clk_ctl, + &qhs_compute0_cfg, + &qhs_compute1_cfg, + &qhs_cpr_cx, + &qhs_cpr_mmcx, + &qhs_cpr_mx, + &qhs_cpr_nspcx, + &qhs_crypto0_cfg, + &qhs_cx_rdpm, + &qhs_dcc_cfg, + &qhs_display0_cfg, + &qhs_display1_cfg, + &qhs_emac0_cfg, + &qhs_emac1_cfg, + &qhs_gpuss_cfg, + &qhs_hwkm, + &qhs_imem_cfg, + &qhs_ipa, + &qhs_ipc_router, + &qhs_lpass_cfg, + &qhs_mx_rdpm, + &qhs_mxc_rdpm, + &qhs_pcie0_cfg, + &qhs_pcie1_cfg, + &qhs_pcie2a_cfg, + &qhs_pcie2b_cfg, + &qhs_pcie3a_cfg, + &qhs_pcie3b_cfg, + &qhs_pcie4_cfg, + &qhs_pcie_rsc_cfg, + &qhs_pdm, + &qhs_pimem_cfg, + &qhs_pka_wrapper_cfg, + &qhs_pmu_wrapper_cfg, + &qhs_qdss_cfg, + &qhs_sdc2, + &qhs_sdc4, + &qhs_security, + &qhs_smmuv3_cfg, + &qhs_smss_cfg, + &qhs_spss_cfg, + &qhs_tcsr, + &qhs_tlmm, + &qhs_ufs_card_cfg, + &qhs_ufs_mem_cfg, + &qhs_usb3_0, + &qhs_usb3_1, + &qhs_usb3_mp, + &qhs_usb4_host_0, + &qhs_usb4_host_1, + &qhs_venus_cfg, + &qhs_vsense_ctrl_cfg, + &qhs_vsense_ctrl_r_cfg, + &qns_a1_noc_cfg, + &qns_a2_noc_cfg, + &qns_anoc_pcie_bridge_cfg, + &qns_ddrss_cfg, + &qns_mnoc_cfg, + &qns_snoc_cfg, + &qns_snoc_sf_bridge_cfg, + &srvc_cnoc + }, +}; + +static struct qcom_icc_bcm bcm_cn2 = { + .name = "CN2", + .num_nodes = 4, + .nodes = { &qhs_qspi, + &qhs_qup0, + &qhs_qup1, + &qhs_qup2 + }, +}; + +static struct qcom_icc_bcm bcm_cn3 = { + .name = "CN3", + .num_nodes = 3, + .nodes = { &qxs_imem, + &xs_smss, + &xs_sys_tcu_cfg + }, +}; + +static struct qcom_icc_bcm bcm_ip0 = { + .name = "IP0", + .num_nodes = 1, + .nodes = { &ipa_core_slave }, +}; + +static struct qcom_icc_bcm bcm_mc0 = { + .name = "MC0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_mm0 = { + .name = "MM0", + .keepalive = true, + .num_nodes = 5, + .nodes = { &qnm_camnoc_hf, + &qnm_mdp0_0, + &qnm_mdp0_1, + &qnm_mdp1_0, + &qns_mem_noc_hf + }, +}; + +static struct qcom_icc_bcm bcm_mm1 = { + .name = "MM1", + .num_nodes = 8, + .nodes = { &qnm_rot_0, + &qnm_rot_1, + &qnm_video0, + &qnm_video1, + &qnm_video_cvp, + &qxm_camnoc_icp, + &qxm_camnoc_sf, + &qns_mem_noc_sf + }, +}; + +static struct qcom_icc_bcm bcm_nsa0 = { + .name = "NSA0", + .num_nodes = 2, + .nodes = { &qns_nsp_gemnoc, + &qxs_nsp_xfr + }, +}; + +static struct qcom_icc_bcm bcm_nsa1 = { + .name = "NSA1", + .num_nodes = 1, + .nodes = { &qxm_nsp }, +}; + +static struct qcom_icc_bcm bcm_nsb0 = { + .name = "NSB0", + .num_nodes = 2, + .nodes = { &qns_nspb_gemnoc, + &qxs_nspb_xfr + }, +}; + +static struct qcom_icc_bcm bcm_nsb1 = { + .name = "NSB1", + .num_nodes = 1, + .nodes = { &qxm_nspb }, +}; + +static struct qcom_icc_bcm bcm_pci0 = { + .name = "PCI0", + .num_nodes = 1, + .nodes = { &qns_pcie_gem_noc }, +}; + +static struct qcom_icc_bcm bcm_qup0 = { + .name = "QUP0", + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup0_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup1 = { + .name = "QUP1", + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup1_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup2 = { + .name = "QUP2", + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup2_core_slave }, +}; + +static struct qcom_icc_bcm bcm_sh0 = { + .name = "SH0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_llcc }, +}; + +static struct qcom_icc_bcm bcm_sh2 = { + .name = "SH2", + .num_nodes = 1, + .nodes = { &chm_apps }, +}; + +static struct qcom_icc_bcm bcm_sn0 = { + .name = "SN0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_gemnoc_sf }, +}; + +static struct qcom_icc_bcm bcm_sn1 = { + .name = "SN1", + .num_nodes = 1, + .nodes = { &qns_gemnoc_gc }, +}; + +static struct qcom_icc_bcm bcm_sn2 = { + .name = "SN2", + .num_nodes = 1, + .nodes = { &qxs_pimem }, +}; + +static struct qcom_icc_bcm bcm_sn3 = { + .name = "SN3", + .num_nodes = 2, + .nodes = { &qns_a1noc_snoc, + &qnm_aggre1_noc + }, +}; + +static struct qcom_icc_bcm bcm_sn4 = { + .name = "SN4", + .num_nodes = 2, + .nodes = { &qns_a2noc_snoc, + &qnm_aggre2_noc + }, +}; + +static struct qcom_icc_bcm bcm_sn5 = { + .name = "SN5", + .num_nodes = 2, + .nodes = { &qns_aggre_usb_snoc, + &qnm_aggre_usb_noc + }, +}; + +static struct qcom_icc_bcm bcm_sn9 = { + .name = "SN9", + .num_nodes = 2, + .nodes = { &qns_sysnoc, + &qnm_lpass_noc + }, +}; + +static struct qcom_icc_bcm bcm_sn10 = { + .name = "SN10", + .num_nodes = 1, + .nodes = { &xs_qdss_stm }, +}; + +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { + &bcm_sn3, + &bcm_sn5, +}; + +static struct qcom_icc_node * const aggre1_noc_nodes[] = { + [MASTER_QSPI_0] = &qhm_qspi, + [MASTER_QUP_1] = &qhm_qup1, + [MASTER_QUP_2] = &qhm_qup2, + [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg, + [MASTER_IPA] = &qxm_ipa, + [MASTER_EMAC_1] = &xm_emac_1, + [MASTER_SDCC_4] = &xm_sdc4, + [MASTER_UFS_MEM] = &xm_ufs_mem, + [MASTER_USB3_0] = &xm_usb3_0, + [MASTER_USB3_1] = &xm_usb3_1, + [MASTER_USB3_MP] = &xm_usb3_mp, + [MASTER_USB4_0] = &xm_usb4_host0, + [MASTER_USB4_1] = &xm_usb4_host1, + [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc, + [SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc, + [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, +}; + +static const struct qcom_icc_desc sc8280xp_aggre1_noc = { + .nodes = aggre1_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), + .bcms = aggre1_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), +}; + +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { + &bcm_ce0, + &bcm_pci0, + &bcm_sn4, +}; + +static struct qcom_icc_node * const aggre2_noc_nodes[] = { + [MASTER_QDSS_BAM] = &qhm_qdss_bam, + [MASTER_QUP_0] = &qhm_qup0, + [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg, + [MASTER_CRYPTO] = &qxm_crypto, + [MASTER_SENSORS_PROC] = &qxm_sensorss_q6, + [MASTER_SP] = &qxm_sp, + [MASTER_EMAC] = &xm_emac_0, + [MASTER_PCIE_0] = &xm_pcie3_0, + [MASTER_PCIE_1] = &xm_pcie3_1, + [MASTER_PCIE_2A] = &xm_pcie3_2a, + [MASTER_PCIE_2B] = &xm_pcie3_2b, + [MASTER_PCIE_3A] = &xm_pcie3_3a, + [MASTER_PCIE_3B] = &xm_pcie3_3b, + [MASTER_PCIE_4] = &xm_pcie3_4, + [MASTER_QDSS_ETR] = &xm_qdss_etr, + [MASTER_SDCC_2] = &xm_sdc2, + [MASTER_UFS_CARD] = &xm_ufs_card, + [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc, + [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gem_noc, + [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, +}; + +static const struct qcom_icc_desc sc8280xp_aggre2_noc = { + .nodes = aggre2_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), + .bcms = aggre2_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), +}; + +static struct qcom_icc_bcm * const clk_virt_bcms[] = { + &bcm_ip0, + &bcm_qup0, + &bcm_qup1, + &bcm_qup2, +}; + +static struct qcom_icc_node * const clk_virt_nodes[] = { + [MASTER_IPA_CORE] = &ipa_core_master, + [MASTER_QUP_CORE_0] = &qup0_core_master, + [MASTER_QUP_CORE_1] = &qup1_core_master, + [MASTER_QUP_CORE_2] = &qup2_core_master, + [SLAVE_IPA_CORE] = &ipa_core_slave, + [SLAVE_QUP_CORE_0] = &qup0_core_slave, + [SLAVE_QUP_CORE_1] = &qup1_core_slave, + [SLAVE_QUP_CORE_2] = &qup2_core_slave, +}; + +static const struct qcom_icc_desc sc8280xp_clk_virt = { + .nodes = clk_virt_nodes, + .num_nodes = ARRAY_SIZE(clk_virt_nodes), + .bcms = clk_virt_bcms, + .num_bcms = ARRAY_SIZE(clk_virt_bcms), +}; + +static struct qcom_icc_bcm * const config_noc_bcms[] = { + &bcm_cn0, + &bcm_cn1, + &bcm_cn2, + &bcm_cn3, + &bcm_sn2, + &bcm_sn10, +}; + +static struct qcom_icc_node * const config_noc_nodes[] = { + [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, + [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, + [SLAVE_AHB2PHY_0] = &qhs_ahb2phy0, + [SLAVE_AHB2PHY_1] = &qhs_ahb2phy1, + [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2, + [SLAVE_AOSS] = &qhs_aoss, + [SLAVE_APPSS] = &qhs_apss, + [SLAVE_CAMERA_CFG] = &qhs_camera_cfg, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_CDSP_CFG] = &qhs_compute0_cfg, + [SLAVE_CDSP1_CFG] = &qhs_compute1_cfg, + [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, + [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx, + [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx, + [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_CX_RDPM] = &qhs_cx_rdpm, + [SLAVE_DCC_CFG] = &qhs_dcc_cfg, + [SLAVE_DISPLAY_CFG] = &qhs_display0_cfg, + [SLAVE_DISPLAY1_CFG] = &qhs_display1_cfg, + [SLAVE_EMAC_CFG] = &qhs_emac0_cfg, + [SLAVE_EMAC1_CFG] = &qhs_emac1_cfg, + [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg, + [SLAVE_HWKM] = &qhs_hwkm, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_IPA_CFG] = &qhs_ipa, + [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router, + [SLAVE_LPASS] = &qhs_lpass_cfg, + [SLAVE_MX_RDPM] = &qhs_mx_rdpm, + [SLAVE_MXC_RDPM] = &qhs_mxc_rdpm, + [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg, + [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg, + [SLAVE_PCIE_2A_CFG] = &qhs_pcie2a_cfg, + [SLAVE_PCIE_2B_CFG] = &qhs_pcie2b_cfg, + [SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg, + [SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg, + [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg, + [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg, + [SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg, + [SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_QSPI_0] = &qhs_qspi, + [SLAVE_QUP_0] = &qhs_qup0, + [SLAVE_QUP_1] = &qhs_qup1, + [SLAVE_QUP_2] = &qhs_qup2, + [SLAVE_SDCC_2] = &qhs_sdc2, + [SLAVE_SDCC_4] = &qhs_sdc4, + [SLAVE_SECURITY] = &qhs_security, + [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg, + [SLAVE_SMSS_CFG] = &qhs_smss_cfg, + [SLAVE_SPSS_CFG] = &qhs_spss_cfg, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM] = &qhs_tlmm, + [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg, + [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg, + [SLAVE_USB3_0] = &qhs_usb3_0, + [SLAVE_USB3_1] = &qhs_usb3_1, + [SLAVE_USB3_MP] = &qhs_usb3_mp, + [SLAVE_USB4_0] = &qhs_usb4_host_0, + [SLAVE_USB4_1] = &qhs_usb4_host_1, + [SLAVE_VENUS_CFG] = &qhs_venus_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, + [SLAVE_VSENSE_CTRL_R_CFG] = &qhs_vsense_ctrl_r_cfg, + [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg, + [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg, + [SLAVE_ANOC_PCIE_BRIDGE_CFG] = &qns_anoc_pcie_bridge_cfg, + [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg, + [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg, + [SLAVE_SNOC_CFG] = &qns_snoc_cfg, + [SLAVE_SNOC_SF_BRIDGE_CFG] = &qns_snoc_sf_bridge_cfg, + [SLAVE_IMEM] = &qxs_imem, + [SLAVE_PIMEM] = &qxs_pimem, + [SLAVE_SERVICE_CNOC] = &srvc_cnoc, + [SLAVE_PCIE_0] = &xs_pcie_0, + [SLAVE_PCIE_1] = &xs_pcie_1, + [SLAVE_PCIE_2A] = &xs_pcie_2a, + [SLAVE_PCIE_2B] = &xs_pcie_2b, + [SLAVE_PCIE_3A] = &xs_pcie_3a, + [SLAVE_PCIE_3B] = &xs_pcie_3b, + [SLAVE_PCIE_4] = &xs_pcie_4, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_SMSS] = &xs_smss, + [SLAVE_TCU] = &xs_sys_tcu_cfg, +}; + +static const struct qcom_icc_desc sc8280xp_config_noc = { + .nodes = config_noc_nodes, + .num_nodes = ARRAY_SIZE(config_noc_nodes), + .bcms = config_noc_bcms, + .num_bcms = ARRAY_SIZE(config_noc_bcms), +}; + +static struct qcom_icc_bcm * const dc_noc_bcms[] = { +}; + +static struct qcom_icc_node * const dc_noc_nodes[] = { + [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc, + [SLAVE_LLCC_CFG] = &qhs_llcc, + [SLAVE_GEM_NOC_CFG] = &qns_gemnoc, +}; + +static const struct qcom_icc_desc sc8280xp_dc_noc = { + .nodes = dc_noc_nodes, + .num_nodes = ARRAY_SIZE(dc_noc_nodes), + .bcms = dc_noc_bcms, + .num_bcms = ARRAY_SIZE(dc_noc_bcms), +}; + +static struct qcom_icc_bcm * const gem_noc_bcms[] = { + &bcm_sh0, + &bcm_sh2, +}; + +static struct qcom_icc_node * const gem_noc_nodes[] = { + [MASTER_GPU_TCU] = &alm_gpu_tcu, + [MASTER_PCIE_TCU] = &alm_pcie_tcu, + [MASTER_SYS_TCU] = &alm_sys_tcu, + [MASTER_APPSS_PROC] = &chm_apps, + [MASTER_COMPUTE_NOC] = &qnm_cmpnoc0, + [MASTER_COMPUTE_NOC_1] = &qnm_cmpnoc1, + [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg, + [MASTER_GFX3D] = &qnm_gpu, + [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf, + [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf, + [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie, + [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, + [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, + [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc, + [SLAVE_LLCC] = &qns_llcc, + [SLAVE_GEM_NOC_PCIE_CNOC] = &qns_pcie, + [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc, + [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc, + [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc, +}; + +static const struct qcom_icc_desc sc8280xp_gem_noc = { + .nodes = gem_noc_nodes, + .num_nodes = ARRAY_SIZE(gem_noc_nodes), + .bcms = gem_noc_bcms, + .num_bcms = ARRAY_SIZE(gem_noc_bcms), +}; + +static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = { + &bcm_sn9, +}; + +static struct qcom_icc_node * const lpass_ag_noc_nodes[] = { + [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc, + [MASTER_LPASS_PROC] = &qxm_lpass_dsp, + [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core, + [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi, + [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu, + [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top, + [SLAVE_LPASS_SNOC] = &qns_sysnoc, + [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc, + [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc, +}; + +static const struct qcom_icc_desc sc8280xp_lpass_ag_noc = { + .nodes = lpass_ag_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), + .bcms = lpass_ag_noc_bcms, + .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms), +}; + +static struct qcom_icc_bcm * const mc_virt_bcms[] = { + &bcm_acv, + &bcm_mc0, +}; + +static struct qcom_icc_node * const mc_virt_nodes[] = { + [MASTER_LLCC] = &llcc_mc, + [SLAVE_EBI1] = &ebi, +}; + +static const struct qcom_icc_desc sc8280xp_mc_virt = { + .nodes = mc_virt_nodes, + .num_nodes = ARRAY_SIZE(mc_virt_nodes), + .bcms = mc_virt_bcms, + .num_bcms = ARRAY_SIZE(mc_virt_bcms), +}; + +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { + &bcm_mm0, + &bcm_mm1, +}; + +static struct qcom_icc_node * const mmss_noc_nodes[] = { + [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, + [MASTER_MDP0] = &qnm_mdp0_0, + [MASTER_MDP1] = &qnm_mdp0_1, + [MASTER_MDP_CORE1_0] = &qnm_mdp1_0, + [MASTER_MDP_CORE1_1] = &qnm_mdp1_1, + [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg, + [MASTER_ROTATOR] = &qnm_rot_0, + [MASTER_ROTATOR_1] = &qnm_rot_1, + [MASTER_VIDEO_P0] = &qnm_video0, + [MASTER_VIDEO_P1] = &qnm_video1, + [MASTER_VIDEO_PROC] = &qnm_video_cvp, + [MASTER_CAMNOC_ICP] = &qxm_camnoc_icp, + [MASTER_CAMNOC_SF] = &qxm_camnoc_sf, + [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, + [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf, + [SLAVE_SERVICE_MNOC] = &srvc_mnoc, +}; + +static const struct qcom_icc_desc sc8280xp_mmss_noc = { + .nodes = mmss_noc_nodes, + .num_nodes = ARRAY_SIZE(mmss_noc_nodes), + .bcms = mmss_noc_bcms, + .num_bcms = ARRAY_SIZE(mmss_noc_bcms), +}; + +static struct qcom_icc_bcm * const nspa_noc_bcms[] = { + &bcm_nsa0, + &bcm_nsa1, +}; + +static struct qcom_icc_node * const nspa_noc_nodes[] = { + [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config, + [MASTER_CDSP_PROC] = &qxm_nsp, + [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, + [SLAVE_NSP_XFR] = &qxs_nsp_xfr, + [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc, +}; + +static const struct qcom_icc_desc sc8280xp_nspa_noc = { + .nodes = nspa_noc_nodes, + .num_nodes = ARRAY_SIZE(nspa_noc_nodes), + .bcms = nspa_noc_bcms, + .num_bcms = ARRAY_SIZE(nspa_noc_bcms), +}; + +static struct qcom_icc_bcm * const nspb_noc_bcms[] = { + &bcm_nsb0, + &bcm_nsb1, +}; + +static struct qcom_icc_node * const nspb_noc_nodes[] = { + [MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config, + [MASTER_CDSP_PROC_B] = &qxm_nspb, + [SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc, + [SLAVE_NSPB_XFR] = &qxs_nspb_xfr, + [SLAVE_SERVICE_NSPB_NOC] = &service_nspb_noc, +}; + +static const struct qcom_icc_desc sc8280xp_nspb_noc = { + .nodes = nspb_noc_nodes, + .num_nodes = ARRAY_SIZE(nspb_noc_nodes), + .bcms = nspb_noc_bcms, + .num_bcms = ARRAY_SIZE(nspb_noc_bcms), +}; + +static struct qcom_icc_bcm * const system_noc_main_bcms[] = { + &bcm_sn0, + &bcm_sn1, + &bcm_sn3, + &bcm_sn4, + &bcm_sn5, + &bcm_sn9, +}; + +static struct qcom_icc_node * const system_noc_main_nodes[] = { + [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, + [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, + [MASTER_USB_NOC_SNOC] = &qnm_aggre_usb_noc, + [MASTER_LPASS_ANOC] = &qnm_lpass_noc, + [MASTER_SNOC_CFG] = &qnm_snoc_cfg, + [MASTER_PIMEM] = &qxm_pimem, + [MASTER_GIC] = &xm_gic, + [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc, + [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf, + [SLAVE_SERVICE_SNOC] = &srvc_snoc, +}; + +static const struct qcom_icc_desc sc8280xp_system_noc_main = { + .nodes = system_noc_main_nodes, + .num_nodes = ARRAY_SIZE(system_noc_main_nodes), + .bcms = system_noc_main_bcms, + .num_bcms = ARRAY_SIZE(system_noc_main_bcms), +}; + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,sc8280xp-aggre1-noc", .data = &sc8280xp_aggre1_noc, }, + { .compatible = "qcom,sc8280xp-aggre2-noc", .data = &sc8280xp_aggre2_noc, }, + { .compatible = "qcom,sc8280xp-clk-virt", .data = &sc8280xp_clk_virt, }, + { .compatible = "qcom,sc8280xp-config-noc", .data = &sc8280xp_config_noc, }, + { .compatible = "qcom,sc8280xp-dc-noc", .data = &sc8280xp_dc_noc, }, + { .compatible = "qcom,sc8280xp-gem-noc", .data = &sc8280xp_gem_noc, }, + { .compatible = "qcom,sc8280xp-lpass-ag-noc", .data = &sc8280xp_lpass_ag_noc, }, + { .compatible = "qcom,sc8280xp-mc-virt", .data = &sc8280xp_mc_virt, }, + { .compatible = "qcom,sc8280xp-mmss-noc", .data = &sc8280xp_mmss_noc, }, + { .compatible = "qcom,sc8280xp-nspa-noc", .data = &sc8280xp_nspa_noc, }, + { .compatible = "qcom,sc8280xp-nspb-noc", .data = &sc8280xp_nspb_noc, }, + { .compatible = "qcom,sc8280xp-system-noc", .data = &sc8280xp_system_noc_main, }, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, + .driver = { + .name = "qnoc-sc8280xp", + .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, + }, +}; + +static int __init qnoc_driver_init(void) +{ + return platform_driver_register(&qnoc_driver); +} +core_initcall(qnoc_driver_init); + +static void __exit qnoc_driver_exit(void) +{ + platform_driver_unregister(&qnoc_driver); +} +module_exit(qnoc_driver_exit); + +MODULE_DESCRIPTION("Qualcomm SC8280XP NoC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/interconnect/qcom/sc8280xp.h b/drivers/interconnect/qcom/sc8280xp.h new file mode 100644 index 000000000000..74d8fa412d65 --- /dev/null +++ b/drivers/interconnect/qcom/sc8280xp.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H +#define __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H + +#define SC8280XP_MASTER_GPU_TCU 0 +#define SC8280XP_MASTER_PCIE_TCU 1 +#define SC8280XP_MASTER_SYS_TCU 2 +#define SC8280XP_MASTER_APPSS_PROC 3 +#define SC8280XP_MASTER_IPA_CORE 4 +#define SC8280XP_MASTER_LLCC 5 +#define SC8280XP_MASTER_CNOC_LPASS_AG_NOC 6 +#define SC8280XP_MASTER_CDSP_NOC_CFG 7 +#define SC8280XP_MASTER_CDSPB_NOC_CFG 8 +#define SC8280XP_MASTER_QDSS_BAM 9 +#define SC8280XP_MASTER_QSPI_0 10 +#define SC8280XP_MASTER_QUP_0 11 +#define SC8280XP_MASTER_QUP_1 12 +#define SC8280XP_MASTER_QUP_2 13 +#define SC8280XP_MASTER_A1NOC_CFG 14 +#define SC8280XP_MASTER_A2NOC_CFG 15 +#define SC8280XP_MASTER_A1NOC_SNOC 16 +#define SC8280XP_MASTER_A2NOC_SNOC 17 +#define SC8280XP_MASTER_USB_NOC_SNOC 18 +#define SC8280XP_MASTER_CAMNOC_HF 19 +#define SC8280XP_MASTER_COMPUTE_NOC 20 +#define SC8280XP_MASTER_COMPUTE_NOC_1 21 +#define SC8280XP_MASTER_CNOC_DC_NOC 22 +#define SC8280XP_MASTER_GEM_NOC_CFG 23 +#define SC8280XP_MASTER_GEM_NOC_CNOC 24 +#define SC8280XP_MASTER_GEM_NOC_PCIE_SNOC 25 +#define SC8280XP_MASTER_GFX3D 26 +#define SC8280XP_MASTER_LPASS_ANOC 27 +#define SC8280XP_MASTER_MDP0 28 +#define SC8280XP_MASTER_MDP1 29 +#define SC8280XP_MASTER_MDP_CORE1_0 30 +#define SC8280XP_MASTER_MDP_CORE1_1 31 +#define SC8280XP_MASTER_CNOC_MNOC_CFG 32 +#define SC8280XP_MASTER_MNOC_HF_MEM_NOC 33 +#define SC8280XP_MASTER_MNOC_SF_MEM_NOC 34 +#define SC8280XP_MASTER_ANOC_PCIE_GEM_NOC 35 +#define SC8280XP_MASTER_ROTATOR 36 +#define SC8280XP_MASTER_ROTATOR_1 37 +#define SC8280XP_MASTER_SNOC_CFG 38 +#define SC8280XP_MASTER_SNOC_GC_MEM_NOC 39 +#define SC8280XP_MASTER_SNOC_SF_MEM_NOC 40 +#define SC8280XP_MASTER_VIDEO_P0 41 +#define SC8280XP_MASTER_VIDEO_P1 42 +#define SC8280XP_MASTER_VIDEO_PROC 43 +#define SC8280XP_MASTER_QUP_CORE_0 44 +#define SC8280XP_MASTER_QUP_CORE_1 45 +#define SC8280XP_MASTER_QUP_CORE_2 46 +#define SC8280XP_MASTER_CAMNOC_ICP 47 +#define SC8280XP_MASTER_CAMNOC_SF 48 +#define SC8280XP_MASTER_CRYPTO 49 +#define SC8280XP_MASTER_IPA 50 +#define SC8280XP_MASTER_LPASS_PROC 51 +#define SC8280XP_MASTER_CDSP_PROC 52 +#define SC8280XP_MASTER_CDSP_PROC_B 53 +#define SC8280XP_MASTER_PIMEM 54 +#define SC8280XP_MASTER_SENSORS_PROC 55 +#define SC8280XP_MASTER_SP 56 +#define SC8280XP_MASTER_EMAC 57 +#define SC8280XP_MASTER_EMAC_1 58 +#define SC8280XP_MASTER_GIC 59 +#define SC8280XP_MASTER_PCIE_0 60 +#define SC8280XP_MASTER_PCIE_1 61 +#define SC8280XP_MASTER_PCIE_2A 62 +#define SC8280XP_MASTER_PCIE_2B 63 +#define SC8280XP_MASTER_PCIE_3A 64 +#define SC8280XP_MASTER_PCIE_3B 65 +#define SC8280XP_MASTER_PCIE_4 66 +#define SC8280XP_MASTER_QDSS_ETR 67 +#define SC8280XP_MASTER_SDCC_2 68 +#define SC8280XP_MASTER_SDCC_4 69 +#define SC8280XP_MASTER_UFS_CARD 70 +#define SC8280XP_MASTER_UFS_MEM 71 +#define SC8280XP_MASTER_USB3_0 72 +#define SC8280XP_MASTER_USB3_1 73 +#define SC8280XP_MASTER_USB3_MP 74 +#define SC8280XP_MASTER_USB4_0 75 +#define SC8280XP_MASTER_USB4_1 76 +#define SC8280XP_SLAVE_EBI1 512 +#define SC8280XP_SLAVE_IPA_CORE 513 +#define SC8280XP_SLAVE_AHB2PHY_0 514 +#define SC8280XP_SLAVE_AHB2PHY_1 515 +#define SC8280XP_SLAVE_AHB2PHY_2 516 +#define SC8280XP_SLAVE_AOSS 517 +#define SC8280XP_SLAVE_APPSS 518 +#define SC8280XP_SLAVE_CAMERA_CFG 519 +#define SC8280XP_SLAVE_CLK_CTL 520 +#define SC8280XP_SLAVE_CDSP_CFG 521 +#define SC8280XP_SLAVE_CDSP1_CFG 522 +#define SC8280XP_SLAVE_RBCPR_CX_CFG 523 +#define SC8280XP_SLAVE_RBCPR_MMCX_CFG 524 +#define SC8280XP_SLAVE_RBCPR_MX_CFG 525 +#define SC8280XP_SLAVE_CPR_NSPCX 526 +#define SC8280XP_SLAVE_CRYPTO_0_CFG 527 +#define SC8280XP_SLAVE_CX_RDPM 528 +#define SC8280XP_SLAVE_DCC_CFG 529 +#define SC8280XP_SLAVE_DISPLAY_CFG 530 +#define SC8280XP_SLAVE_DISPLAY1_CFG 531 +#define SC8280XP_SLAVE_EMAC_CFG 532 +#define SC8280XP_SLAVE_EMAC1_CFG 533 +#define SC8280XP_SLAVE_GFX3D_CFG 534 +#define SC8280XP_SLAVE_HWKM 535 +#define SC8280XP_SLAVE_IMEM_CFG 536 +#define SC8280XP_SLAVE_IPA_CFG 537 +#define SC8280XP_SLAVE_IPC_ROUTER_CFG 538 +#define SC8280XP_SLAVE_LLCC_CFG 539 +#define SC8280XP_SLAVE_LPASS 540 +#define SC8280XP_SLAVE_LPASS_CORE_CFG 541 +#define SC8280XP_SLAVE_LPASS_LPI_CFG 542 +#define SC8280XP_SLAVE_LPASS_MPU_CFG 543 +#define SC8280XP_SLAVE_LPASS_TOP_CFG 544 +#define SC8280XP_SLAVE_MX_RDPM 545 +#define SC8280XP_SLAVE_MXC_RDPM 546 +#define SC8280XP_SLAVE_PCIE_0_CFG 547 +#define SC8280XP_SLAVE_PCIE_1_CFG 548 +#define SC8280XP_SLAVE_PCIE_2A_CFG 549 +#define SC8280XP_SLAVE_PCIE_2B_CFG 550 +#define SC8280XP_SLAVE_PCIE_3A_CFG 551 +#define SC8280XP_SLAVE_PCIE_3B_CFG 552 +#define SC8280XP_SLAVE_PCIE_4_CFG 553 +#define SC8280XP_SLAVE_PCIE_RSC_CFG 554 +#define SC8280XP_SLAVE_PDM 555 +#define SC8280XP_SLAVE_PIMEM_CFG 556 +#define SC8280XP_SLAVE_PKA_WRAPPER_CFG 557 +#define SC8280XP_SLAVE_PMU_WRAPPER_CFG 558 +#define SC8280XP_SLAVE_QDSS_CFG 559 +#define SC8280XP_SLAVE_QSPI_0 560 +#define SC8280XP_SLAVE_QUP_0 561 +#define SC8280XP_SLAVE_QUP_1 562 +#define SC8280XP_SLAVE_QUP_2 563 +#define SC8280XP_SLAVE_SDCC_2 564 +#define SC8280XP_SLAVE_SDCC_4 565 +#define SC8280XP_SLAVE_SECURITY 566 +#define SC8280XP_SLAVE_SMMUV3_CFG 567 +#define SC8280XP_SLAVE_SMSS_CFG 568 +#define SC8280XP_SLAVE_SPSS_CFG 569 +#define SC8280XP_SLAVE_TCSR 570 +#define SC8280XP_SLAVE_TLMM 571 +#define SC8280XP_SLAVE_UFS_CARD_CFG 572 +#define SC8280XP_SLAVE_UFS_MEM_CFG 573 +#define SC8280XP_SLAVE_USB3_0 574 +#define SC8280XP_SLAVE_USB3_1 575 +#define SC8280XP_SLAVE_USB3_MP 576 +#define SC8280XP_SLAVE_USB4_0 577 +#define SC8280XP_SLAVE_USB4_1 578 +#define SC8280XP_SLAVE_VENUS_CFG 579 +#define SC8280XP_SLAVE_VSENSE_CTRL_CFG 580 +#define SC8280XP_SLAVE_VSENSE_CTRL_R_CFG 581 +#define SC8280XP_SLAVE_A1NOC_CFG 582 +#define SC8280XP_SLAVE_A1NOC_SNOC 583 +#define SC8280XP_SLAVE_A2NOC_CFG 584 +#define SC8280XP_SLAVE_A2NOC_SNOC 585 +#define SC8280XP_SLAVE_USB_NOC_SNOC 586 +#define SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG 587 +#define SC8280XP_SLAVE_DDRSS_CFG 588 +#define SC8280XP_SLAVE_GEM_NOC_CNOC 589 +#define SC8280XP_SLAVE_GEM_NOC_CFG 590 +#define SC8280XP_SLAVE_SNOC_GEM_NOC_GC 591 +#define SC8280XP_SLAVE_SNOC_GEM_NOC_SF 592 +#define SC8280XP_SLAVE_LLCC 593 +#define SC8280XP_SLAVE_MNOC_HF_MEM_NOC 594 +#define SC8280XP_SLAVE_MNOC_SF_MEM_NOC 595 +#define SC8280XP_SLAVE_CNOC_MNOC_CFG 596 +#define SC8280XP_SLAVE_CDSP_MEM_NOC 597 +#define SC8280XP_SLAVE_CDSPB_MEM_NOC 598 +#define SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC 599 +#define SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC 600 +#define SC8280XP_SLAVE_SNOC_CFG 601 +#define SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG 602 +#define SC8280XP_SLAVE_LPASS_SNOC 603 +#define SC8280XP_SLAVE_QUP_CORE_0 604 +#define SC8280XP_SLAVE_QUP_CORE_1 605 +#define SC8280XP_SLAVE_QUP_CORE_2 606 +#define SC8280XP_SLAVE_IMEM 607 +#define SC8280XP_SLAVE_NSP_XFR 608 +#define SC8280XP_SLAVE_NSPB_XFR 609 +#define SC8280XP_SLAVE_PIMEM 610 +#define SC8280XP_SLAVE_SERVICE_NSP_NOC 611 +#define SC8280XP_SLAVE_SERVICE_NSPB_NOC 612 +#define SC8280XP_SLAVE_SERVICE_A1NOC 613 +#define SC8280XP_SLAVE_SERVICE_A2NOC 614 +#define SC8280XP_SLAVE_SERVICE_CNOC 615 +#define SC8280XP_SLAVE_SERVICE_GEM_NOC_1 616 +#define SC8280XP_SLAVE_SERVICE_MNOC 617 +#define SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC 618 +#define SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC 619 +#define SC8280XP_SLAVE_SERVICE_GEM_NOC_2 620 +#define SC8280XP_SLAVE_SERVICE_SNOC 621 +#define SC8280XP_SLAVE_SERVICE_GEM_NOC 622 +#define SC8280XP_SLAVE_PCIE_0 623 +#define SC8280XP_SLAVE_PCIE_1 624 +#define SC8280XP_SLAVE_PCIE_2A 625 +#define SC8280XP_SLAVE_PCIE_2B 626 +#define SC8280XP_SLAVE_PCIE_3A 627 +#define SC8280XP_SLAVE_PCIE_3B 628 +#define SC8280XP_SLAVE_PCIE_4 629 +#define SC8280XP_SLAVE_QDSS_STM 630 +#define SC8280XP_SLAVE_SMSS 631 +#define SC8280XP_SLAVE_TCU 632 + +#endif + diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c index 274a7139fe1a..8d879b0bcabc 100644 --- a/drivers/interconnect/qcom/sdm660.c +++ b/drivers/interconnect/qcom/sdm660.c @@ -1490,7 +1490,7 @@ static struct qcom_icc_node slv_srvc_snoc = { .slv_rpm_id = 29, }; -static struct qcom_icc_node *sdm660_a2noc_nodes[] = { +static struct qcom_icc_node * const sdm660_a2noc_nodes[] = { [MASTER_IPA] = &mas_ipa, [MASTER_CNOC_A2NOC] = &mas_cnoc_a2noc, [MASTER_SDCC_1] = &mas_sdcc_1, @@ -1512,7 +1512,7 @@ static const struct regmap_config sdm660_a2noc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc sdm660_a2noc = { +static const struct qcom_icc_desc sdm660_a2noc = { .type = QCOM_ICC_NOC, .nodes = sdm660_a2noc_nodes, .num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes), @@ -1521,7 +1521,7 @@ static struct qcom_icc_desc sdm660_a2noc = { .regmap_cfg = &sdm660_a2noc_regmap_config, }; -static struct qcom_icc_node *sdm660_bimc_nodes[] = { +static struct qcom_icc_node * const sdm660_bimc_nodes[] = { [MASTER_GNOC_BIMC] = &mas_gnoc_bimc, [MASTER_OXILI] = &mas_oxili, [MASTER_MNOC_BIMC] = &mas_mnoc_bimc, @@ -1540,14 +1540,14 @@ static const struct regmap_config sdm660_bimc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc sdm660_bimc = { +static const struct qcom_icc_desc sdm660_bimc = { .type = QCOM_ICC_BIMC, .nodes = sdm660_bimc_nodes, .num_nodes = ARRAY_SIZE(sdm660_bimc_nodes), .regmap_cfg = &sdm660_bimc_regmap_config, }; -static struct qcom_icc_node *sdm660_cnoc_nodes[] = { +static struct qcom_icc_node * const sdm660_cnoc_nodes[] = { [MASTER_SNOC_CNOC] = &mas_snoc_cnoc, [MASTER_QDSS_DAP] = &mas_qdss_dap, [SLAVE_CNOC_A2NOC] = &slv_cnoc_a2noc, @@ -1594,14 +1594,14 @@ static const struct regmap_config sdm660_cnoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc sdm660_cnoc = { +static const struct qcom_icc_desc sdm660_cnoc = { .type = QCOM_ICC_NOC, .nodes = sdm660_cnoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes), .regmap_cfg = &sdm660_cnoc_regmap_config, }; -static struct qcom_icc_node *sdm660_gnoc_nodes[] = { +static struct qcom_icc_node * const sdm660_gnoc_nodes[] = { [MASTER_APSS_PROC] = &mas_apss_proc, [SLAVE_GNOC_BIMC] = &slv_gnoc_bimc, [SLAVE_GNOC_SNOC] = &slv_gnoc_snoc, @@ -1615,14 +1615,14 @@ static const struct regmap_config sdm660_gnoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc sdm660_gnoc = { +static const struct qcom_icc_desc sdm660_gnoc = { .type = QCOM_ICC_NOC, .nodes = sdm660_gnoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes), .regmap_cfg = &sdm660_gnoc_regmap_config, }; -static struct qcom_icc_node *sdm660_mnoc_nodes[] = { +static struct qcom_icc_node * const sdm660_mnoc_nodes[] = { [MASTER_CPP] = &mas_cpp, [MASTER_JPEG] = &mas_jpeg, [MASTER_MDP_P0] = &mas_mdp_p0, @@ -1655,7 +1655,7 @@ static const struct regmap_config sdm660_mnoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc sdm660_mnoc = { +static const struct qcom_icc_desc sdm660_mnoc = { .type = QCOM_ICC_NOC, .nodes = sdm660_mnoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes), @@ -1664,7 +1664,7 @@ static struct qcom_icc_desc sdm660_mnoc = { .regmap_cfg = &sdm660_mnoc_regmap_config, }; -static struct qcom_icc_node *sdm660_snoc_nodes[] = { +static struct qcom_icc_node * const sdm660_snoc_nodes[] = { [MASTER_QDSS_ETR] = &mas_qdss_etr, [MASTER_QDSS_BAM] = &mas_qdss_bam, [MASTER_SNOC_CFG] = &mas_snoc_cfg, @@ -1692,7 +1692,7 @@ static const struct regmap_config sdm660_snoc_regmap_config = { .fast_io = true, }; -static struct qcom_icc_desc sdm660_snoc = { +static const struct qcom_icc_desc sdm660_snoc = { .type = QCOM_ICC_NOC, .nodes = sdm660_snoc_nodes, .num_nodes = ARRAY_SIZE(sdm660_snoc_nodes), diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c index d2195079c228..954e7bd13fc4 100644 --- a/drivers/interconnect/qcom/sdm845.c +++ b/drivers/interconnect/qcom/sdm845.c @@ -175,12 +175,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic); DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc); DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc); -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { &bcm_sn9, &bcm_qup0, }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg, [MASTER_TSIF] = &qhm_tsif, [MASTER_SDCC_2] = &xm_sdc2, @@ -201,13 +201,13 @@ static const struct qcom_icc_desc sdm845_aggre1_noc = { .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_ce0, &bcm_sn11, &bcm_qup0, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg, [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_CNOC_A2NOC] = &qnm_cnoc, @@ -230,11 +230,11 @@ static const struct qcom_icc_desc sdm845_aggre2_noc = { .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), }; -static struct qcom_icc_bcm *config_noc_bcms[] = { +static struct qcom_icc_bcm * const config_noc_bcms[] = { &bcm_cn0, }; -static struct qcom_icc_node *config_noc_nodes[] = { +static struct qcom_icc_node * const config_noc_nodes[] = { [MASTER_SPDM] = &qhm_spdm, [MASTER_TIC] = &qhm_tic, [MASTER_SNOC_CNOC] = &qnm_snoc, @@ -291,10 +291,10 @@ static const struct qcom_icc_desc sdm845_config_noc = { .num_bcms = ARRAY_SIZE(config_noc_bcms), }; -static struct qcom_icc_bcm *dc_noc_bcms[] = { +static struct qcom_icc_bcm * const dc_noc_bcms[] = { }; -static struct qcom_icc_node *dc_noc_nodes[] = { +static struct qcom_icc_node * const dc_noc_nodes[] = { [MASTER_CNOC_DC_NOC] = &qhm_cnoc, [SLAVE_LLCC_CFG] = &qhs_llcc, [SLAVE_MEM_NOC_CFG] = &qhs_memnoc, @@ -307,10 +307,10 @@ static const struct qcom_icc_desc sdm845_dc_noc = { .num_bcms = ARRAY_SIZE(dc_noc_bcms), }; -static struct qcom_icc_bcm *gladiator_noc_bcms[] = { +static struct qcom_icc_bcm * const gladiator_noc_bcms[] = { }; -static struct qcom_icc_node *gladiator_noc_nodes[] = { +static struct qcom_icc_node * const gladiator_noc_nodes[] = { [MASTER_APPSS_PROC] = &acm_l3, [MASTER_GNOC_CFG] = &pm_gnoc_cfg, [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv, @@ -325,7 +325,7 @@ static const struct qcom_icc_desc sdm845_gladiator_noc = { .num_bcms = ARRAY_SIZE(gladiator_noc_bcms), }; -static struct qcom_icc_bcm *mem_noc_bcms[] = { +static struct qcom_icc_bcm * const mem_noc_bcms[] = { &bcm_mc0, &bcm_acv, &bcm_sh0, @@ -335,7 +335,7 @@ static struct qcom_icc_bcm *mem_noc_bcms[] = { &bcm_sh5, }; -static struct qcom_icc_node *mem_noc_nodes[] = { +static struct qcom_icc_node * const mem_noc_nodes[] = { [MASTER_TCU_0] = &acm_tcu, [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg, [MASTER_GNOC_MEM_NOC] = &qnm_apps, @@ -360,14 +360,14 @@ static const struct qcom_icc_desc sdm845_mem_noc = { .num_bcms = ARRAY_SIZE(mem_noc_bcms), }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm2, &bcm_mm3, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg, [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0, [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1, @@ -394,7 +394,7 @@ static const struct qcom_icc_desc sdm845_mmss_noc = { .num_bcms = ARRAY_SIZE(mmss_noc_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn1, &bcm_sn2, @@ -411,7 +411,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = { &bcm_sn15, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_SNOC_CFG] = &qhm_snoc_cfg, [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c index e3ac25a997b7..130a828c3873 100644 --- a/drivers/interconnect/qcom/sdx55.c +++ b/drivers/interconnect/qcom/sdx55.c @@ -99,11 +99,11 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc); DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_memnoc_pcie); DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_ipa, &xm_ipa2pcie_slv); -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_mc0, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI_CH0] = &ebi, }; @@ -115,13 +115,13 @@ static const struct qcom_icc_desc sdx55_mc_virt = { .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; -static struct qcom_icc_bcm *mem_noc_bcms[] = { +static struct qcom_icc_bcm * const mem_noc_bcms[] = { &bcm_sh0, &bcm_sh3, &bcm_sh4, }; -static struct qcom_icc_node *mem_noc_nodes[] = { +static struct qcom_icc_node * const mem_noc_nodes[] = { [MASTER_TCU_0] = &acm_tcu, [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, [MASTER_AMPSS_M0] = &xm_apps_rdwr, @@ -137,7 +137,7 @@ static const struct qcom_icc_desc sdx55_mem_noc = { .num_bcms = ARRAY_SIZE(mem_noc_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_ce0, &bcm_pn0, &bcm_pn1, @@ -156,7 +156,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = { &bcm_sn11, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_AUDIO] = &qhm_audio, [MASTER_BLSP_1] = &qhm_blsp1, [MASTER_QDSS_BAM] = &qhm_qdss_bam, diff --git a/drivers/interconnect/qcom/sdx65.c b/drivers/interconnect/qcom/sdx65.c new file mode 100644 index 000000000000..b16d31d53e9b --- /dev/null +++ b/drivers/interconnect/qcom/sdx65.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/device.h> +#include <linux/interconnect.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <dt-bindings/interconnect/qcom,sdx65.h> + +#include "bcm-voter.h" +#include "icc-rpmh.h" +#include "sdx65.h" + +DEFINE_QNODE(llcc_mc, SDX65_MASTER_LLCC, 1, 4, SDX65_SLAVE_EBI1); +DEFINE_QNODE(acm_tcu, SDX65_MASTER_TCU_0, 1, 8, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC); +DEFINE_QNODE(qnm_snoc_gc, SDX65_MASTER_SNOC_GC_MEM_NOC, 1, 16, SDX65_SLAVE_LLCC); +DEFINE_QNODE(xm_apps_rdwr, SDX65_MASTER_APPSS_PROC, 1, 16, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC); +DEFINE_QNODE(qhm_audio, SDX65_MASTER_AUDIO, 1, 4, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(qhm_blsp1, SDX65_MASTER_BLSP_1, 1, 4, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(qhm_qdss_bam, SDX65_MASTER_QDSS_BAM, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU); +DEFINE_QNODE(qhm_qpic, SDX65_MASTER_QPIC, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(qhm_snoc_cfg, SDX65_MASTER_SNOC_CFG, 1, 4, SDX65_SLAVE_SERVICE_SNOC); +DEFINE_QNODE(qhm_spmi_fetcher1, SDX65_MASTER_SPMI_FETCHER, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(qnm_aggre_noc, SDX65_MASTER_ANOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU); +DEFINE_QNODE(qnm_ipa, SDX65_MASTER_IPA, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM); +DEFINE_QNODE(qnm_memnoc, SDX65_MASTER_MEM_NOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_IMEM, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU); +DEFINE_QNODE(qnm_memnoc_pcie, SDX65_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_SLAVE_PCIE_0); +DEFINE_QNODE(qxm_crypto, SDX65_MASTER_CRYPTO, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(xm_ipa2pcie_slv, SDX65_MASTER_IPA_PCIE, 1, 8, SDX65_SLAVE_PCIE_0); +DEFINE_QNODE(xm_pcie, SDX65_MASTER_PCIE_0, 1, 8, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(xm_qdss_etr, SDX65_MASTER_QDSS_ETR, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU); +DEFINE_QNODE(xm_sdc1, SDX65_MASTER_SDCC_1, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(xm_usb3, SDX65_MASTER_USB3, 1, 8, SDX65_SLAVE_ANOC_SNOC); +DEFINE_QNODE(ebi, SDX65_SLAVE_EBI1, 1, 4); +DEFINE_QNODE(qns_llcc, SDX65_SLAVE_LLCC, 1, 16, SDX65_MASTER_LLCC); +DEFINE_QNODE(qns_memnoc_snoc, SDX65_SLAVE_MEM_NOC_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_SNOC); +DEFINE_QNODE(qns_sys_pcie, SDX65_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_PCIE_SNOC); +DEFINE_QNODE(qhs_aoss, SDX65_SLAVE_AOSS, 1, 4); +DEFINE_QNODE(qhs_apss, SDX65_SLAVE_APPSS, 1, 4); +DEFINE_QNODE(qhs_audio, SDX65_SLAVE_AUDIO, 1, 4); +DEFINE_QNODE(qhs_blsp1, SDX65_SLAVE_BLSP_1, 1, 4); +DEFINE_QNODE(qhs_clk_ctl, SDX65_SLAVE_CLK_CTL, 1, 4); +DEFINE_QNODE(qhs_crypto0_cfg, SDX65_SLAVE_CRYPTO_0_CFG, 1, 4); +DEFINE_QNODE(qhs_ddrss_cfg, SDX65_SLAVE_CNOC_DDRSS, 1, 4); +DEFINE_QNODE(qhs_ecc_cfg, SDX65_SLAVE_ECC_CFG, 1, 4); +DEFINE_QNODE(qhs_imem_cfg, SDX65_SLAVE_IMEM_CFG, 1, 4); +DEFINE_QNODE(qhs_ipa, SDX65_SLAVE_IPA_CFG, 1, 4); +DEFINE_QNODE(qhs_mss_cfg, SDX65_SLAVE_CNOC_MSS, 1, 4); +DEFINE_QNODE(qhs_pcie_parf, SDX65_SLAVE_PCIE_PARF, 1, 4); +DEFINE_QNODE(qhs_pdm, SDX65_SLAVE_PDM, 1, 4); +DEFINE_QNODE(qhs_prng, SDX65_SLAVE_PRNG, 1, 4); +DEFINE_QNODE(qhs_qdss_cfg, SDX65_SLAVE_QDSS_CFG, 1, 4); +DEFINE_QNODE(qhs_qpic, SDX65_SLAVE_QPIC, 1, 4); +DEFINE_QNODE(qhs_sdc1, SDX65_SLAVE_SDCC_1, 1, 4); +DEFINE_QNODE(qhs_snoc_cfg, SDX65_SLAVE_SNOC_CFG, 1, 4, SDX65_MASTER_SNOC_CFG); +DEFINE_QNODE(qhs_spmi_fetcher, SDX65_SLAVE_SPMI_FETCHER, 1, 4); +DEFINE_QNODE(qhs_spmi_vgi_coex, SDX65_SLAVE_SPMI_VGI_COEX, 1, 4); +DEFINE_QNODE(qhs_tcsr, SDX65_SLAVE_TCSR, 1, 4); +DEFINE_QNODE(qhs_tlmm, SDX65_SLAVE_TLMM, 1, 4); +DEFINE_QNODE(qhs_usb3, SDX65_SLAVE_USB3, 1, 4); +DEFINE_QNODE(qhs_usb3_phy, SDX65_SLAVE_USB3_PHY_CFG, 1, 4); +DEFINE_QNODE(qns_aggre_noc, SDX65_SLAVE_ANOC_SNOC, 1, 8, SDX65_MASTER_ANOC_SNOC); +DEFINE_QNODE(qns_snoc_memnoc, SDX65_SLAVE_SNOC_MEM_NOC_GC, 1, 16, SDX65_MASTER_SNOC_GC_MEM_NOC); +DEFINE_QNODE(qxs_imem, SDX65_SLAVE_IMEM, 1, 8); +DEFINE_QNODE(srvc_snoc, SDX65_SLAVE_SERVICE_SNOC, 1, 4); +DEFINE_QNODE(xs_pcie, SDX65_SLAVE_PCIE_0, 1, 8); +DEFINE_QNODE(xs_qdss_stm, SDX65_SLAVE_QDSS_STM, 1, 4); +DEFINE_QNODE(xs_sys_tcu_cfg, SDX65_SLAVE_TCU, 1, 8); + +DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto); +DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi); +DEFINE_QBCM(bcm_pn0, "PN0", true, &qhm_snoc_cfg, &qhs_aoss, &qhs_apss, &qhs_audio, &qhs_blsp1, &qhs_clk_ctl, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_ecc_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mss_cfg, &qhs_pcie_parf, &qhs_pdm, &qhs_prng, &qhs_qdss_cfg, &qhs_qpic, &qhs_sdc1, &qhs_snoc_cfg, &qhs_spmi_fetcher, &qhs_spmi_vgi_coex, &qhs_tcsr, &qhs_tlmm, &qhs_usb3, &qhs_usb3_phy, &srvc_snoc); +DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1); +DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1); +DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic); +DEFINE_QBCM(bcm_pn4, "PN4", false, &qxm_crypto); +DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc); +DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_memnoc_snoc); +DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr); +DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc); +DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem); +DEFINE_QBCM(bcm_sn2, "SN2", false, &xs_qdss_stm); +DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_sys_tcu_cfg); +DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie); +DEFINE_QBCM(bcm_sn6, "SN6", false, &qhm_qdss_bam, &xm_qdss_etr); +DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc); +DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_memnoc); +DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc_pcie); +DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_ipa, &xm_ipa2pcie_slv); + +static struct qcom_icc_bcm * const mc_virt_bcms[] = { + &bcm_mc0, +}; + +static struct qcom_icc_node * const mc_virt_nodes[] = { + [MASTER_LLCC] = &llcc_mc, + [SLAVE_EBI1] = &ebi, +}; + +static const struct qcom_icc_desc sdx65_mc_virt = { + .nodes = mc_virt_nodes, + .num_nodes = ARRAY_SIZE(mc_virt_nodes), + .bcms = mc_virt_bcms, + .num_bcms = ARRAY_SIZE(mc_virt_bcms), +}; + +static struct qcom_icc_bcm * const mem_noc_bcms[] = { + &bcm_sh0, + &bcm_sh1, + &bcm_sh3, +}; + +static struct qcom_icc_node * const mem_noc_nodes[] = { + [MASTER_TCU_0] = &acm_tcu, + [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, + [MASTER_APPSS_PROC] = &xm_apps_rdwr, + [SLAVE_LLCC] = &qns_llcc, + [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc, + [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie, +}; + +static const struct qcom_icc_desc sdx65_mem_noc = { + .nodes = mem_noc_nodes, + .num_nodes = ARRAY_SIZE(mem_noc_nodes), + .bcms = mem_noc_bcms, + .num_bcms = ARRAY_SIZE(mem_noc_bcms), +}; + +static struct qcom_icc_bcm * const system_noc_bcms[] = { + &bcm_ce0, + &bcm_pn0, + &bcm_pn1, + &bcm_pn2, + &bcm_pn3, + &bcm_pn4, + &bcm_sn0, + &bcm_sn1, + &bcm_sn2, + &bcm_sn3, + &bcm_sn5, + &bcm_sn6, + &bcm_sn7, + &bcm_sn8, + &bcm_sn9, + &bcm_sn10, +}; + +static struct qcom_icc_node * const system_noc_nodes[] = { + [MASTER_AUDIO] = &qhm_audio, + [MASTER_BLSP_1] = &qhm_blsp1, + [MASTER_QDSS_BAM] = &qhm_qdss_bam, + [MASTER_QPIC] = &qhm_qpic, + [MASTER_SNOC_CFG] = &qhm_snoc_cfg, + [MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1, + [MASTER_ANOC_SNOC] = &qnm_aggre_noc, + [MASTER_IPA] = &qnm_ipa, + [MASTER_MEM_NOC_SNOC] = &qnm_memnoc, + [MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie, + [MASTER_CRYPTO] = &qxm_crypto, + [MASTER_IPA_PCIE] = &xm_ipa2pcie_slv, + [MASTER_PCIE_0] = &xm_pcie, + [MASTER_QDSS_ETR] = &xm_qdss_etr, + [MASTER_SDCC_1] = &xm_sdc1, + [MASTER_USB3] = &xm_usb3, + [SLAVE_AOSS] = &qhs_aoss, + [SLAVE_APPSS] = &qhs_apss, + [SLAVE_AUDIO] = &qhs_audio, + [SLAVE_BLSP_1] = &qhs_blsp1, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg, + [SLAVE_ECC_CFG] = &qhs_ecc_cfg, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_IPA_CFG] = &qhs_ipa, + [SLAVE_CNOC_MSS] = &qhs_mss_cfg, + [SLAVE_PCIE_PARF] = &qhs_pcie_parf, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_PRNG] = &qhs_prng, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_QPIC] = &qhs_qpic, + [SLAVE_SDCC_1] = &qhs_sdc1, + [SLAVE_SNOC_CFG] = &qhs_snoc_cfg, + [SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher, + [SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM] = &qhs_tlmm, + [SLAVE_USB3] = &qhs_usb3, + [SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy, + [SLAVE_ANOC_SNOC] = &qns_aggre_noc, + [SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc, + [SLAVE_IMEM] = &qxs_imem, + [SLAVE_SERVICE_SNOC] = &srvc_snoc, + [SLAVE_PCIE_0] = &xs_pcie, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_TCU] = &xs_sys_tcu_cfg, +}; + +static const struct qcom_icc_desc sdx65_system_noc = { + .nodes = system_noc_nodes, + .num_nodes = ARRAY_SIZE(system_noc_nodes), + .bcms = system_noc_bcms, + .num_bcms = ARRAY_SIZE(system_noc_bcms), +}; + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,sdx65-mc-virt", + .data = &sdx65_mc_virt}, + { .compatible = "qcom,sdx65-mem-noc", + .data = &sdx65_mem_noc}, + { .compatible = "qcom,sdx65-system-noc", + .data = &sdx65_system_noc}, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, + .driver = { + .name = "qnoc-sdx65", + .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, + }, +}; +module_platform_driver(qnoc_driver); + +MODULE_DESCRIPTION("Qualcomm SDX65 NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/sdx65.h b/drivers/interconnect/qcom/sdx65.h new file mode 100644 index 000000000000..5dca6e8b32c9 --- /dev/null +++ b/drivers/interconnect/qcom/sdx65.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX65_H +#define __DRIVERS_INTERCONNECT_QCOM_SDX65_H + +#define SDX65_MASTER_TCU_0 0 +#define SDX65_MASTER_LLCC 1 +#define SDX65_MASTER_AUDIO 2 +#define SDX65_MASTER_BLSP_1 3 +#define SDX65_MASTER_QDSS_BAM 4 +#define SDX65_MASTER_QPIC 5 +#define SDX65_MASTER_SNOC_CFG 6 +#define SDX65_MASTER_SPMI_FETCHER 7 +#define SDX65_MASTER_ANOC_SNOC 8 +#define SDX65_MASTER_IPA 9 +#define SDX65_MASTER_MEM_NOC_SNOC 10 +#define SDX65_MASTER_MEM_NOC_PCIE_SNOC 11 +#define SDX65_MASTER_SNOC_GC_MEM_NOC 12 +#define SDX65_MASTER_CRYPTO 13 +#define SDX65_MASTER_APPSS_PROC 14 +#define SDX65_MASTER_IPA_PCIE 15 +#define SDX65_MASTER_PCIE_0 16 +#define SDX65_MASTER_QDSS_ETR 17 +#define SDX65_MASTER_SDCC_1 18 +#define SDX65_MASTER_USB3 19 +#define SDX65_SLAVE_EBI1 512 +#define SDX65_SLAVE_AOSS 513 +#define SDX65_SLAVE_APPSS 514 +#define SDX65_SLAVE_AUDIO 515 +#define SDX65_SLAVE_BLSP_1 516 +#define SDX65_SLAVE_CLK_CTL 517 +#define SDX65_SLAVE_CRYPTO_0_CFG 518 +#define SDX65_SLAVE_CNOC_DDRSS 519 +#define SDX65_SLAVE_ECC_CFG 520 +#define SDX65_SLAVE_IMEM_CFG 521 +#define SDX65_SLAVE_IPA_CFG 522 +#define SDX65_SLAVE_CNOC_MSS 523 +#define SDX65_SLAVE_PCIE_PARF 524 +#define SDX65_SLAVE_PDM 525 +#define SDX65_SLAVE_PRNG 526 +#define SDX65_SLAVE_QDSS_CFG 527 +#define SDX65_SLAVE_QPIC 528 +#define SDX65_SLAVE_SDCC_1 529 +#define SDX65_SLAVE_SNOC_CFG 530 +#define SDX65_SLAVE_SPMI_FETCHER 531 +#define SDX65_SLAVE_SPMI_VGI_COEX 532 +#define SDX65_SLAVE_TCSR 533 +#define SDX65_SLAVE_TLMM 534 +#define SDX65_SLAVE_USB3 535 +#define SDX65_SLAVE_USB3_PHY_CFG 536 +#define SDX65_SLAVE_ANOC_SNOC 537 +#define SDX65_SLAVE_LLCC 538 +#define SDX65_SLAVE_MEM_NOC_SNOC 539 +#define SDX65_SLAVE_SNOC_MEM_NOC_GC 540 +#define SDX65_SLAVE_MEM_NOC_PCIE_SNOC 541 +#define SDX65_SLAVE_IMEM 542 +#define SDX65_SLAVE_SERVICE_SNOC 543 +#define SDX65_SLAVE_PCIE_0 544 +#define SDX65_SLAVE_QDSS_STM 545 +#define SDX65_SLAVE_TCU 546 + +#endif diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index 745e3c36a61a..1d04a4bfea80 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -186,12 +186,12 @@ DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic); DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc); DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc); -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { &bcm_qup0, &bcm_sn3, }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg, [MASTER_QUP_0] = &qhm_qup0, [MASTER_EMAC] = &xm_emac, @@ -202,21 +202,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, }; -static struct qcom_icc_desc sm8150_aggre1_noc = { +static const struct qcom_icc_desc sm8150_aggre1_noc = { .nodes = aggre1_noc_nodes, .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), .bcms = aggre1_noc_bcms, .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_ce0, &bcm_qup0, &bcm_sn14, &bcm_sn3, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg, [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_QSPI] = &qhm_qspi, @@ -237,53 +237,53 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, }; -static struct qcom_icc_desc sm8150_aggre2_noc = { +static const struct qcom_icc_desc sm8150_aggre2_noc = { .nodes = aggre2_noc_nodes, .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), .bcms = aggre2_noc_bcms, .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), }; -static struct qcom_icc_bcm *camnoc_virt_bcms[] = { +static struct qcom_icc_bcm * const camnoc_virt_bcms[] = { &bcm_mm1, }; -static struct qcom_icc_node *camnoc_virt_nodes[] = { +static struct qcom_icc_node * const camnoc_virt_nodes[] = { [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp, [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp, [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp, [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp, }; -static struct qcom_icc_desc sm8150_camnoc_virt = { +static const struct qcom_icc_desc sm8150_camnoc_virt = { .nodes = camnoc_virt_nodes, .num_nodes = ARRAY_SIZE(camnoc_virt_nodes), .bcms = camnoc_virt_bcms, .num_bcms = ARRAY_SIZE(camnoc_virt_bcms), }; -static struct qcom_icc_bcm *compute_noc_bcms[] = { +static struct qcom_icc_bcm * const compute_noc_bcms[] = { &bcm_co0, &bcm_co1, }; -static struct qcom_icc_node *compute_noc_nodes[] = { +static struct qcom_icc_node * const compute_noc_nodes[] = { [MASTER_NPU] = &qnm_npu, [SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc, }; -static struct qcom_icc_desc sm8150_compute_noc = { +static const struct qcom_icc_desc sm8150_compute_noc = { .nodes = compute_noc_nodes, .num_nodes = ARRAY_SIZE(compute_noc_nodes), .bcms = compute_noc_bcms, .num_bcms = ARRAY_SIZE(compute_noc_bcms), }; -static struct qcom_icc_bcm *config_noc_bcms[] = { +static struct qcom_icc_bcm * const config_noc_bcms[] = { &bcm_cn0, }; -static struct qcom_icc_node *config_noc_nodes[] = { +static struct qcom_icc_node * const config_noc_nodes[] = { [MASTER_SPDM] = &qhm_spdm, [SNOC_CNOC_MAS] = &qnm_snoc, [MASTER_QDSS_DAP] = &xm_qdss_dap, @@ -340,30 +340,30 @@ static struct qcom_icc_node *config_noc_nodes[] = { [SLAVE_SERVICE_CNOC] = &srvc_cnoc, }; -static struct qcom_icc_desc sm8150_config_noc = { +static const struct qcom_icc_desc sm8150_config_noc = { .nodes = config_noc_nodes, .num_nodes = ARRAY_SIZE(config_noc_nodes), .bcms = config_noc_bcms, .num_bcms = ARRAY_SIZE(config_noc_bcms), }; -static struct qcom_icc_bcm *dc_noc_bcms[] = { +static struct qcom_icc_bcm * const dc_noc_bcms[] = { }; -static struct qcom_icc_node *dc_noc_nodes[] = { +static struct qcom_icc_node * const dc_noc_nodes[] = { [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc, [SLAVE_LLCC_CFG] = &qhs_llcc, [SLAVE_GEM_NOC_CFG] = &qhs_memnoc, }; -static struct qcom_icc_desc sm8150_dc_noc = { +static const struct qcom_icc_desc sm8150_dc_noc = { .nodes = dc_noc_nodes, .num_nodes = ARRAY_SIZE(dc_noc_nodes), .bcms = dc_noc_bcms, .num_bcms = ARRAY_SIZE(dc_noc_bcms), }; -static struct qcom_icc_bcm *gem_noc_bcms[] = { +static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh2, &bcm_sh3, @@ -371,7 +371,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = { &bcm_sh5, }; -static struct qcom_icc_node *gem_noc_nodes[] = { +static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_AMPSS_M0] = &acm_apps, [MASTER_GPU_TCU] = &acm_gpu_tcu, [MASTER_SYS_TCU] = &acm_sys_tcu, @@ -391,54 +391,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = { [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc, }; -static struct qcom_icc_desc sm8150_gem_noc = { +static const struct qcom_icc_desc sm8150_gem_noc = { .nodes = gem_noc_nodes, .num_nodes = ARRAY_SIZE(gem_noc_nodes), .bcms = gem_noc_bcms, .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *ipa_virt_bcms[] = { +static struct qcom_icc_bcm * const ipa_virt_bcms[] = { &bcm_ip0, }; -static struct qcom_icc_node *ipa_virt_nodes[] = { +static struct qcom_icc_node * const ipa_virt_nodes[] = { [MASTER_IPA_CORE] = &ipa_core_master, [SLAVE_IPA_CORE] = &ipa_core_slave, }; -static struct qcom_icc_desc sm8150_ipa_virt = { +static const struct qcom_icc_desc sm8150_ipa_virt = { .nodes = ipa_virt_nodes, .num_nodes = ARRAY_SIZE(ipa_virt_nodes), .bcms = ipa_virt_bcms, .num_bcms = ARRAY_SIZE(ipa_virt_bcms), }; -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI_CH0] = &ebi, }; -static struct qcom_icc_desc sm8150_mc_virt = { +static const struct qcom_icc_desc sm8150_mc_virt = { .nodes = mc_virt_nodes, .num_nodes = ARRAY_SIZE(mc_virt_nodes), .bcms = mc_virt_bcms, .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm2, &bcm_mm3, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg, [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0, [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1, @@ -454,14 +454,14 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_SERVICE_MNOC] = &srvc_mnoc, }; -static struct qcom_icc_desc sm8150_mmss_noc = { +static const struct qcom_icc_desc sm8150_mmss_noc = { .nodes = mmss_noc_nodes, .num_nodes = ARRAY_SIZE(mmss_noc_nodes), .bcms = mmss_noc_bcms, .num_bcms = ARRAY_SIZE(mmss_noc_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn1, &bcm_sn11, @@ -475,7 +475,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = { &bcm_sn9, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_SNOC_CFG] = &qhm_snoc_cfg, [A1NOC_SNOC_MAS] = &qnm_aggre1_noc, [A2NOC_SNOC_MAS] = &qnm_aggre2_noc, @@ -495,7 +495,7 @@ static struct qcom_icc_node *system_noc_nodes[] = { [SLAVE_TCU] = &xs_sys_tcu_cfg, }; -static struct qcom_icc_desc sm8150_system_noc = { +static const struct qcom_icc_desc sm8150_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index aa707582ea01..5cdb058fa095 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -195,12 +195,12 @@ DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_gemnoc_pcie); DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gemnoc); DEFINE_QBCM(bcm_sn12, "SN12", false, &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc); -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { &bcm_qup0, &bcm_sn12, }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg, [MASTER_QSPI_0] = &qhm_qspi, [MASTER_QUP_1] = &qhm_qup1, @@ -216,20 +216,20 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, }; -static struct qcom_icc_desc sm8250_aggre1_noc = { +static const struct qcom_icc_desc sm8250_aggre1_noc = { .nodes = aggre1_noc_nodes, .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), .bcms = aggre1_noc_bcms, .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_ce0, &bcm_qup0, &bcm_sn12, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg, [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_QUP_0] = &qhm_qup0, @@ -246,35 +246,35 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, }; -static struct qcom_icc_desc sm8250_aggre2_noc = { +static const struct qcom_icc_desc sm8250_aggre2_noc = { .nodes = aggre2_noc_nodes, .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), .bcms = aggre2_noc_bcms, .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), }; -static struct qcom_icc_bcm *compute_noc_bcms[] = { +static struct qcom_icc_bcm * const compute_noc_bcms[] = { &bcm_co0, &bcm_co2, }; -static struct qcom_icc_node *compute_noc_nodes[] = { +static struct qcom_icc_node * const compute_noc_nodes[] = { [MASTER_NPU] = &qnm_npu, [SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc, }; -static struct qcom_icc_desc sm8250_compute_noc = { +static const struct qcom_icc_desc sm8250_compute_noc = { .nodes = compute_noc_nodes, .num_nodes = ARRAY_SIZE(compute_noc_nodes), .bcms = compute_noc_bcms, .num_bcms = ARRAY_SIZE(compute_noc_bcms), }; -static struct qcom_icc_bcm *config_noc_bcms[] = { +static struct qcom_icc_bcm * const config_noc_bcms[] = { &bcm_cn0, }; -static struct qcom_icc_node *config_noc_nodes[] = { +static struct qcom_icc_node * const config_noc_nodes[] = { [SNOC_CNOC_MAS] = &qnm_snoc, [MASTER_QDSS_DAP] = &xm_qdss_dap, [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg, @@ -329,37 +329,37 @@ static struct qcom_icc_node *config_noc_nodes[] = { [SLAVE_SERVICE_CNOC] = &srvc_cnoc, }; -static struct qcom_icc_desc sm8250_config_noc = { +static const struct qcom_icc_desc sm8250_config_noc = { .nodes = config_noc_nodes, .num_nodes = ARRAY_SIZE(config_noc_nodes), .bcms = config_noc_bcms, .num_bcms = ARRAY_SIZE(config_noc_bcms), }; -static struct qcom_icc_bcm *dc_noc_bcms[] = { +static struct qcom_icc_bcm * const dc_noc_bcms[] = { }; -static struct qcom_icc_node *dc_noc_nodes[] = { +static struct qcom_icc_node * const dc_noc_nodes[] = { [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc, [SLAVE_LLCC_CFG] = &qhs_llcc, [SLAVE_GEM_NOC_CFG] = &qhs_memnoc, }; -static struct qcom_icc_desc sm8250_dc_noc = { +static const struct qcom_icc_desc sm8250_dc_noc = { .nodes = dc_noc_nodes, .num_nodes = ARRAY_SIZE(dc_noc_nodes), .bcms = dc_noc_bcms, .num_bcms = ARRAY_SIZE(dc_noc_bcms), }; -static struct qcom_icc_bcm *gem_noc_bcms[] = { +static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh2, &bcm_sh3, &bcm_sh4, }; -static struct qcom_icc_node *gem_noc_nodes[] = { +static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_GPU_TCU] = &alm_gpu_tcu, [MASTER_SYS_TCU] = &alm_sys_tcu, [MASTER_AMPSS_M0] = &chm_apps, @@ -379,54 +379,54 @@ static struct qcom_icc_node *gem_noc_nodes[] = { [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc, }; -static struct qcom_icc_desc sm8250_gem_noc = { +static const struct qcom_icc_desc sm8250_gem_noc = { .nodes = gem_noc_nodes, .num_nodes = ARRAY_SIZE(gem_noc_nodes), .bcms = gem_noc_bcms, .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *ipa_virt_bcms[] = { +static struct qcom_icc_bcm * const ipa_virt_bcms[] = { &bcm_ip0, }; -static struct qcom_icc_node *ipa_virt_nodes[] = { +static struct qcom_icc_node * const ipa_virt_nodes[] = { [MASTER_IPA_CORE] = &ipa_core_master, [SLAVE_IPA_CORE] = &ipa_core_slave, }; -static struct qcom_icc_desc sm8250_ipa_virt = { +static const struct qcom_icc_desc sm8250_ipa_virt = { .nodes = ipa_virt_nodes, .num_nodes = ARRAY_SIZE(ipa_virt_nodes), .bcms = ipa_virt_bcms, .num_bcms = ARRAY_SIZE(ipa_virt_bcms), }; -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI_CH0] = &ebi, }; -static struct qcom_icc_desc sm8250_mc_virt = { +static const struct qcom_icc_desc sm8250_mc_virt = { .nodes = mc_virt_nodes, .num_nodes = ARRAY_SIZE(mc_virt_nodes), .bcms = mc_virt_bcms, .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm2, &bcm_mm3, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg, [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp, @@ -442,17 +442,17 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_SERVICE_MNOC] = &srvc_mnoc, }; -static struct qcom_icc_desc sm8250_mmss_noc = { +static const struct qcom_icc_desc sm8250_mmss_noc = { .nodes = mmss_noc_nodes, .num_nodes = ARRAY_SIZE(mmss_noc_nodes), .bcms = mmss_noc_bcms, .num_bcms = ARRAY_SIZE(mmss_noc_bcms), }; -static struct qcom_icc_bcm *npu_noc_bcms[] = { +static struct qcom_icc_bcm * const npu_noc_bcms[] = { }; -static struct qcom_icc_node *npu_noc_nodes[] = { +static struct qcom_icc_node * const npu_noc_nodes[] = { [MASTER_NPU_SYS] = &amm_npu_sys, [MASTER_NPU_CDP] = &amm_npu_sys_cdp_w, [MASTER_NPU_NOC_CFG] = &qhm_cfg, @@ -468,14 +468,14 @@ static struct qcom_icc_node *npu_noc_nodes[] = { [SLAVE_SERVICE_NPU_NOC] = &srvc_noc, }; -static struct qcom_icc_desc sm8250_npu_noc = { +static const struct qcom_icc_desc sm8250_npu_noc = { .nodes = npu_noc_nodes, .num_nodes = ARRAY_SIZE(npu_noc_nodes), .bcms = npu_noc_bcms, .num_bcms = ARRAY_SIZE(npu_noc_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn1, &bcm_sn11, @@ -489,7 +489,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = { &bcm_sn9, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_SNOC_CFG] = &qhm_snoc_cfg, [A1NOC_SNOC_MAS] = &qnm_aggre1_noc, [A2NOC_SNOC_MAS] = &qnm_aggre2_noc, @@ -511,7 +511,7 @@ static struct qcom_icc_node *system_noc_nodes[] = { [SLAVE_TCU] = &xs_sys_tcu_cfg, }; -static struct qcom_icc_desc sm8250_system_noc = { +static const struct qcom_icc_desc sm8250_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c index c79f93a1ac73..5398e7c8d826 100644 --- a/drivers/interconnect/qcom/sm8350.c +++ b/drivers/interconnect/qcom/sm8350.c @@ -198,10 +198,10 @@ DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp); DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp); DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp); -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_QSPI_0] = &qhm_qspi, [MASTER_QUP_1] = &qhm_qup1, [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg, @@ -213,21 +213,21 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, }; -static struct qcom_icc_desc sm8350_aggre1_noc = { +static const struct qcom_icc_desc sm8350_aggre1_noc = { .nodes = aggre1_noc_nodes, .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), .bcms = aggre1_noc_bcms, .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_ce0, &bcm_sn5, &bcm_sn6, &bcm_sn14, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_QUP_0] = &qhm_qup0, [MASTER_QUP_2] = &qhm_qup2, @@ -244,14 +244,14 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, }; -static struct qcom_icc_desc sm8350_aggre2_noc = { +static const struct qcom_icc_desc sm8350_aggre2_noc = { .nodes = aggre2_noc_nodes, .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), .bcms = aggre2_noc_bcms, .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), }; -static struct qcom_icc_bcm *config_noc_bcms[] = { +static struct qcom_icc_bcm * const config_noc_bcms[] = { &bcm_cn0, &bcm_cn1, &bcm_cn2, @@ -259,7 +259,7 @@ static struct qcom_icc_bcm *config_noc_bcms[] = { &bcm_sn4, }; -static struct qcom_icc_node *config_noc_nodes[] = { +static struct qcom_icc_node * const config_noc_nodes[] = { [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, [MASTER_QDSS_DAP] = &xm_qdss_dap, @@ -323,30 +323,30 @@ static struct qcom_icc_node *config_noc_nodes[] = { [SLAVE_TCU] = &xs_sys_tcu_cfg, }; -static struct qcom_icc_desc sm8350_config_noc = { +static const struct qcom_icc_desc sm8350_config_noc = { .nodes = config_noc_nodes, .num_nodes = ARRAY_SIZE(config_noc_nodes), .bcms = config_noc_bcms, .num_bcms = ARRAY_SIZE(config_noc_bcms), }; -static struct qcom_icc_bcm *dc_noc_bcms[] = { +static struct qcom_icc_bcm * const dc_noc_bcms[] = { }; -static struct qcom_icc_node *dc_noc_nodes[] = { +static struct qcom_icc_node * const dc_noc_nodes[] = { [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc, [SLAVE_LLCC_CFG] = &qhs_llcc, [SLAVE_GEM_NOC_CFG] = &qns_gemnoc, }; -static struct qcom_icc_desc sm8350_dc_noc = { +static const struct qcom_icc_desc sm8350_dc_noc = { .nodes = dc_noc_nodes, .num_nodes = ARRAY_SIZE(dc_noc_nodes), .bcms = dc_noc_bcms, .num_bcms = ARRAY_SIZE(dc_noc_bcms), }; -static struct qcom_icc_bcm *gem_noc_bcms[] = { +static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh2, &bcm_sh3, @@ -354,7 +354,7 @@ static struct qcom_icc_bcm *gem_noc_bcms[] = { &bcm_sh0_disp, }; -static struct qcom_icc_node *gem_noc_nodes[] = { +static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_GPU_TCU] = &alm_gpu_tcu, [MASTER_SYS_TCU] = &alm_sys_tcu, [MASTER_APPSS_PROC] = &chm_apps, @@ -379,17 +379,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = { [SLAVE_LLCC_DISP] = &qns_llcc_disp, }; -static struct qcom_icc_desc sm8350_gem_noc = { +static const struct qcom_icc_desc sm8350_gem_noc = { .nodes = gem_noc_nodes, .num_nodes = ARRAY_SIZE(gem_noc_nodes), .bcms = gem_noc_bcms, .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = { +static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = { }; -static struct qcom_icc_node *lpass_ag_noc_nodes[] = { +static struct qcom_icc_node * const lpass_ag_noc_nodes[] = { [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc, [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core, [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi, @@ -399,35 +399,35 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = { [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc, }; -static struct qcom_icc_desc sm8350_lpass_ag_noc = { +static const struct qcom_icc_desc sm8350_lpass_ag_noc = { .nodes = lpass_ag_noc_nodes, .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), .bcms = lpass_ag_noc_bcms, .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms), }; -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, &bcm_acv_disp, &bcm_mc0_disp, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI1] = &ebi, [MASTER_LLCC_DISP] = &llcc_mc_disp, [SLAVE_EBI1_DISP] = &ebi_disp, }; -static struct qcom_icc_desc sm8350_mc_virt = { +static const struct qcom_icc_desc sm8350_mc_virt = { .nodes = mc_virt_nodes, .num_nodes = ARRAY_SIZE(mc_virt_nodes), .bcms = mc_virt_bcms, .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm4, @@ -438,7 +438,7 @@ static struct qcom_icc_bcm *mmss_noc_bcms[] = { &bcm_mm5_disp, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp, [MASTER_CAMNOC_SF] = &qnm_camnoc_sf, @@ -459,40 +459,40 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp, }; -static struct qcom_icc_desc sm8350_mmss_noc = { +static const struct qcom_icc_desc sm8350_mmss_noc = { .nodes = mmss_noc_nodes, .num_nodes = ARRAY_SIZE(mmss_noc_nodes), .bcms = mmss_noc_bcms, .num_bcms = ARRAY_SIZE(mmss_noc_bcms), }; -static struct qcom_icc_bcm *nsp_noc_bcms[] = { +static struct qcom_icc_bcm * const nsp_noc_bcms[] = { &bcm_co0, &bcm_co3, }; -static struct qcom_icc_node *nsp_noc_nodes[] = { +static struct qcom_icc_node * const nsp_noc_nodes[] = { [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config, [MASTER_CDSP_PROC] = &qxm_nsp, [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc, }; -static struct qcom_icc_desc sm8350_compute_noc = { +static const struct qcom_icc_desc sm8350_compute_noc = { .nodes = nsp_noc_nodes, .num_nodes = ARRAY_SIZE(nsp_noc_nodes), .bcms = nsp_noc_bcms, .num_bcms = ARRAY_SIZE(nsp_noc_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn2, &bcm_sn7, &bcm_sn8, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, [MASTER_SNOC_CFG] = &qnm_snoc_cfg, @@ -503,7 +503,7 @@ static struct qcom_icc_node *system_noc_nodes[] = { [SLAVE_SERVICE_SNOC] = &srvc_snoc, }; -static struct qcom_icc_desc sm8350_system_noc = { +static const struct qcom_icc_desc sm8350_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c index 8d99ee6421df..7e3d372b712f 100644 --- a/drivers/interconnect/qcom/sm8450.c +++ b/drivers/interconnect/qcom/sm8450.c @@ -1526,10 +1526,10 @@ static struct qcom_icc_bcm bcm_sh1_disp = { .nodes = { &qnm_pcie_disp }, }; -static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { }; -static struct qcom_icc_node *aggre1_noc_nodes[] = { +static struct qcom_icc_node * const aggre1_noc_nodes[] = { [MASTER_QSPI_0] = &qhm_qspi, [MASTER_QUP_1] = &qhm_qup1, [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg, @@ -1540,18 +1540,18 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = { [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, }; -static struct qcom_icc_desc sm8450_aggre1_noc = { +static const struct qcom_icc_desc sm8450_aggre1_noc = { .nodes = aggre1_noc_nodes, .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), .bcms = aggre1_noc_bcms, .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), }; -static struct qcom_icc_bcm *aggre2_noc_bcms[] = { +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_ce0, }; -static struct qcom_icc_node *aggre2_noc_nodes[] = { +static struct qcom_icc_node * const aggre2_noc_nodes[] = { [MASTER_QDSS_BAM] = &qhm_qdss_bam, [MASTER_QUP_0] = &qhm_qup0, [MASTER_QUP_2] = &qhm_qup2, @@ -1567,20 +1567,20 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = { [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, }; -static struct qcom_icc_desc sm8450_aggre2_noc = { +static const struct qcom_icc_desc sm8450_aggre2_noc = { .nodes = aggre2_noc_nodes, .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), .bcms = aggre2_noc_bcms, .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), }; -static struct qcom_icc_bcm *clk_virt_bcms[] = { +static struct qcom_icc_bcm * const clk_virt_bcms[] = { &bcm_qup0, &bcm_qup1, &bcm_qup2, }; -static struct qcom_icc_node *clk_virt_nodes[] = { +static struct qcom_icc_node * const clk_virt_nodes[] = { [MASTER_QUP_CORE_0] = &qup0_core_master, [MASTER_QUP_CORE_1] = &qup1_core_master, [MASTER_QUP_CORE_2] = &qup2_core_master, @@ -1589,18 +1589,18 @@ static struct qcom_icc_node *clk_virt_nodes[] = { [SLAVE_QUP_CORE_2] = &qup2_core_slave, }; -static struct qcom_icc_desc sm8450_clk_virt = { +static const struct qcom_icc_desc sm8450_clk_virt = { .nodes = clk_virt_nodes, .num_nodes = ARRAY_SIZE(clk_virt_nodes), .bcms = clk_virt_bcms, .num_bcms = ARRAY_SIZE(clk_virt_bcms), }; -static struct qcom_icc_bcm *config_noc_bcms[] = { +static struct qcom_icc_bcm * const config_noc_bcms[] = { &bcm_cn0, }; -static struct qcom_icc_node *config_noc_nodes[] = { +static struct qcom_icc_node * const config_noc_nodes[] = { [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0, @@ -1658,21 +1658,21 @@ static struct qcom_icc_node *config_noc_nodes[] = { [SLAVE_TCU] = &xs_sys_tcu_cfg, }; -static struct qcom_icc_desc sm8450_config_noc = { +static const struct qcom_icc_desc sm8450_config_noc = { .nodes = config_noc_nodes, .num_nodes = ARRAY_SIZE(config_noc_nodes), .bcms = config_noc_bcms, .num_bcms = ARRAY_SIZE(config_noc_bcms), }; -static struct qcom_icc_bcm *gem_noc_bcms[] = { +static struct qcom_icc_bcm * const gem_noc_bcms[] = { &bcm_sh0, &bcm_sh1, &bcm_sh0_disp, &bcm_sh1_disp, }; -static struct qcom_icc_node *gem_noc_nodes[] = { +static struct qcom_icc_node * const gem_noc_nodes[] = { [MASTER_GPU_TCU] = &alm_gpu_tcu, [MASTER_SYS_TCU] = &alm_sys_tcu, [MASTER_APPSS_PROC] = &chm_apps, @@ -1693,17 +1693,17 @@ static struct qcom_icc_node *gem_noc_nodes[] = { [SLAVE_LLCC_DISP] = &qns_llcc_disp, }; -static struct qcom_icc_desc sm8450_gem_noc = { +static const struct qcom_icc_desc sm8450_gem_noc = { .nodes = gem_noc_nodes, .num_nodes = ARRAY_SIZE(gem_noc_nodes), .bcms = gem_noc_bcms, .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = { +static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = { }; -static struct qcom_icc_node *lpass_ag_noc_nodes[] = { +static struct qcom_icc_node * const lpass_ag_noc_nodes[] = { [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc, [MASTER_LPASS_PROC] = &qxm_lpass_dsp, [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core, @@ -1715,42 +1715,42 @@ static struct qcom_icc_node *lpass_ag_noc_nodes[] = { [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc, }; -static struct qcom_icc_desc sm8450_lpass_ag_noc = { +static const struct qcom_icc_desc sm8450_lpass_ag_noc = { .nodes = lpass_ag_noc_nodes, .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), .bcms = lpass_ag_noc_bcms, .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms), }; -static struct qcom_icc_bcm *mc_virt_bcms[] = { +static struct qcom_icc_bcm * const mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, &bcm_acv_disp, &bcm_mc0_disp, }; -static struct qcom_icc_node *mc_virt_nodes[] = { +static struct qcom_icc_node * const mc_virt_nodes[] = { [MASTER_LLCC] = &llcc_mc, [SLAVE_EBI1] = &ebi, [MASTER_LLCC_DISP] = &llcc_mc_disp, [SLAVE_EBI1_DISP] = &ebi_disp, }; -static struct qcom_icc_desc sm8450_mc_virt = { +static const struct qcom_icc_desc sm8450_mc_virt = { .nodes = mc_virt_nodes, .num_nodes = ARRAY_SIZE(mc_virt_nodes), .bcms = mc_virt_bcms, .num_bcms = ARRAY_SIZE(mc_virt_bcms), }; -static struct qcom_icc_bcm *mmss_noc_bcms[] = { +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { &bcm_mm0, &bcm_mm1, &bcm_mm0_disp, &bcm_mm1_disp, }; -static struct qcom_icc_node *mmss_noc_nodes[] = { +static struct qcom_icc_node * const mmss_noc_nodes[] = { [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp, [MASTER_CAMNOC_SF] = &qnm_camnoc_sf, @@ -1771,36 +1771,36 @@ static struct qcom_icc_node *mmss_noc_nodes[] = { [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp, }; -static struct qcom_icc_desc sm8450_mmss_noc = { +static const struct qcom_icc_desc sm8450_mmss_noc = { .nodes = mmss_noc_nodes, .num_nodes = ARRAY_SIZE(mmss_noc_nodes), .bcms = mmss_noc_bcms, .num_bcms = ARRAY_SIZE(mmss_noc_bcms), }; -static struct qcom_icc_bcm *nsp_noc_bcms[] = { +static struct qcom_icc_bcm * const nsp_noc_bcms[] = { &bcm_co0, }; -static struct qcom_icc_node *nsp_noc_nodes[] = { +static struct qcom_icc_node * const nsp_noc_nodes[] = { [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config, [MASTER_CDSP_PROC] = &qxm_nsp, [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc, }; -static struct qcom_icc_desc sm8450_nsp_noc = { +static const struct qcom_icc_desc sm8450_nsp_noc = { .nodes = nsp_noc_nodes, .num_nodes = ARRAY_SIZE(nsp_noc_nodes), .bcms = nsp_noc_bcms, .num_bcms = ARRAY_SIZE(nsp_noc_bcms), }; -static struct qcom_icc_bcm *pcie_anoc_bcms[] = { +static struct qcom_icc_bcm * const pcie_anoc_bcms[] = { &bcm_sn7, }; -static struct qcom_icc_node *pcie_anoc_nodes[] = { +static struct qcom_icc_node * const pcie_anoc_nodes[] = { [MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg, [MASTER_PCIE_0] = &xm_pcie3_0, [MASTER_PCIE_1] = &xm_pcie3_1, @@ -1808,14 +1808,14 @@ static struct qcom_icc_node *pcie_anoc_nodes[] = { [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc, }; -static struct qcom_icc_desc sm8450_pcie_anoc = { +static const struct qcom_icc_desc sm8450_pcie_anoc = { .nodes = pcie_anoc_nodes, .num_nodes = ARRAY_SIZE(pcie_anoc_nodes), .bcms = pcie_anoc_bcms, .num_bcms = ARRAY_SIZE(pcie_anoc_bcms), }; -static struct qcom_icc_bcm *system_noc_bcms[] = { +static struct qcom_icc_bcm * const system_noc_bcms[] = { &bcm_sn0, &bcm_sn1, &bcm_sn2, @@ -1823,7 +1823,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = { &bcm_sn4, }; -static struct qcom_icc_node *system_noc_nodes[] = { +static struct qcom_icc_node * const system_noc_nodes[] = { [MASTER_GIC_AHB] = &qhm_gic, [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, @@ -1836,7 +1836,7 @@ static struct qcom_icc_node *system_noc_nodes[] = { [SLAVE_SERVICE_SNOC] = &srvc_snoc, }; -static struct qcom_icc_desc sm8450_system_noc = { +static const struct qcom_icc_desc sm8450_system_noc = { .nodes = system_noc_nodes, .num_nodes = ARRAY_SIZE(system_noc_nodes), .bcms = system_noc_bcms, @@ -1848,7 +1848,7 @@ static int qnoc_probe(struct platform_device *pdev) const struct qcom_icc_desc *desc; struct icc_onecell_data *data; struct icc_provider *provider; - struct qcom_icc_node **qnodes; + struct qcom_icc_node * const *qnodes; struct qcom_icc_provider *qp; struct icc_node *node; size_t num_nodes, i; diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index 92c0611034b0..075f3a36d512 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -530,11 +530,8 @@ exit_done: } break; case OP_SWP: - if (altera_check_stack(stack_ptr, 2, &status)) { - long_tmp = stack[stack_ptr - 2]; - stack[stack_ptr - 2] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, 2, &status)) + swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); break; case OP_ADD: if (altera_check_stack(stack_ptr, 2, &status)) { @@ -912,34 +909,22 @@ exit_done: */ /* SWP */ - if (altera_check_stack(stack_ptr, 2, &status)) { - long_tmp = stack[stack_ptr - 2]; - stack[stack_ptr - 2] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, 2, &status)) + swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); /* SWPN 7 */ index = 7 + 1; - if (altera_check_stack(stack_ptr, index, &status)) { - long_tmp = stack[stack_ptr - index]; - stack[stack_ptr - index] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, index, &status)) + swap(stack[stack_ptr - index], stack[stack_ptr - 1]); /* SWP */ - if (altera_check_stack(stack_ptr, 2, &status)) { - long_tmp = stack[stack_ptr - 2]; - stack[stack_ptr - 2] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, 2, &status)) + swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); /* SWPN 6 */ index = 6 + 1; - if (altera_check_stack(stack_ptr, index, &status)) { - long_tmp = stack[stack_ptr - index]; - stack[stack_ptr - index] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, index, &status)) + swap(stack[stack_ptr - index], stack[stack_ptr - 1]); /* DUPN 8 */ index = 8 + 1; @@ -950,18 +935,12 @@ exit_done: /* SWPN 2 */ index = 2 + 1; - if (altera_check_stack(stack_ptr, index, &status)) { - long_tmp = stack[stack_ptr - index]; - stack[stack_ptr - index] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, index, &status)) + swap(stack[stack_ptr - index], stack[stack_ptr - 1]); /* SWP */ - if (altera_check_stack(stack_ptr, 2, &status)) { - long_tmp = stack[stack_ptr - 2]; - stack[stack_ptr - 2] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, 2, &status)) + swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); /* DUPN 6 */ index = 6 + 1; @@ -1075,11 +1054,8 @@ exit_done: * to swap with top element */ index = (args[0]) + 1; - if (altera_check_stack(stack_ptr, index, &status)) { - long_tmp = stack[stack_ptr - index]; - stack[stack_ptr - index] = stack[stack_ptr - 1]; - stack[stack_ptr - 1] = long_tmp; - } + if (altera_check_stack(stack_ptr, index, &status)) + swap(stack[stack_ptr - index], stack[stack_ptr - 1]); break; case OP_DUPN: /* diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c index 066b9ef7fcd7..3c081504f38c 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.c +++ b/drivers/misc/bcm-vk/bcm_vk_msg.c @@ -757,20 +757,19 @@ static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk, u16 q_num, u16 msg_id) { - bool found = false; - struct bcm_vk_wkent *entry; + struct bcm_vk_wkent *entry = NULL, *iter; spin_lock(&chan->pendq_lock); - list_for_each_entry(entry, &chan->pendq[q_num], node) { - if (get_msg_id(&entry->to_v_msg[0]) == msg_id) { - list_del(&entry->node); - found = true; + list_for_each_entry(iter, &chan->pendq[q_num], node) { + if (get_msg_id(&iter->to_v_msg[0]) == msg_id) { + list_del(&iter->node); + entry = iter; bcm_vk_msgid_bitmap_clear(vk, msg_id, 1); break; } } spin_unlock(&chan->pendq_lock); - return ((found) ? entry : NULL); + return entry; } s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk) @@ -1010,16 +1009,14 @@ ssize_t bcm_vk_read(struct file *p_file, miscdev); struct device *dev = &vk->pdev->dev; struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan; - struct bcm_vk_wkent *entry = NULL; + struct bcm_vk_wkent *entry = NULL, *iter; u32 q_num; u32 rsp_length; - bool found = false; if (!bcm_vk_drv_access_ok(vk)) return -EPERM; dev_dbg(dev, "Buf count %zu\n", count); - found = false; /* * search through the pendq on the to_h chan, and return only those @@ -1028,13 +1025,13 @@ ssize_t bcm_vk_read(struct file *p_file, */ spin_lock(&chan->pendq_lock); for (q_num = 0; q_num < chan->q_nr; q_num++) { - list_for_each_entry(entry, &chan->pendq[q_num], node) { - if (entry->ctx->idx == ctx->idx) { + list_for_each_entry(iter, &chan->pendq[q_num], node) { + if (iter->ctx->idx == ctx->idx) { if (count >= - (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) { - list_del(&entry->node); + (iter->to_h_blks * VK_MSGQ_BLK_SIZE)) { + list_del(&iter->node); atomic_dec(&ctx->pend_cnt); - found = true; + entry = iter; } else { /* buffer not big enough */ rc = -EMSGSIZE; @@ -1046,7 +1043,7 @@ ssize_t bcm_vk_read(struct file *p_file, read_loop_exit: spin_unlock(&chan->pendq_lock); - if (found) { + if (entry) { /* retrieve the passed down msg_id */ set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id); rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE; diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c index 3f514d77a843..9080f9f150a2 100644 --- a/drivers/misc/cardreader/alcor_pci.c +++ b/drivers/misc/cardreader/alcor_pci.c @@ -317,12 +317,15 @@ static int alcor_pci_probe(struct pci_dev *pdev, ret = mfd_add_devices(&pdev->dev, priv->id, alcor_pci_cells, ARRAY_SIZE(alcor_pci_cells), NULL, 0, NULL); if (ret < 0) - goto error_release_regions; + goto error_clear_drvdata; alcor_pci_aspm_ctrl(priv, 0); return 0; +error_clear_drvdata: + pci_clear_master(pdev); + pci_set_drvdata(pdev, NULL); error_release_regions: pci_release_regions(pdev); error_free_ida: @@ -343,6 +346,7 @@ static void alcor_pci_remove(struct pci_dev *pdev) ida_free(&alcor_pci_idr, priv->id); pci_release_regions(pdev); + pci_clear_master(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index a77585ab0f30..749cc5a46d13 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -57,40 +57,6 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage) 0xFF, driving[drive_sel][2]); } -static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr) -{ - struct pci_dev *pdev = pcr->pci; - u32 reg; - - /* 0x814~0x817 */ - pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); - pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); - - if (!rts5261_vendor_setting_valid(reg)) { - /* Not support MMC default */ - pcr->extra_caps |= EXTRA_CAPS_NO_MMC; - pcr_dbg(pcr, "skip fetch vendor setting\n"); - return; - } - - if (!rts5261_reg_check_mmc_support(reg)) - pcr->extra_caps |= EXTRA_CAPS_NO_MMC; - - /* TO do: need to add rtd3 function */ - pcr->rtd3_en = rts5261_reg_to_rtd3(reg); - - if (rts5261_reg_check_reverse_socket(reg)) - pcr->flags |= PCR_REVERSE_SOCKET; - - /* 0x724~0x727 */ - pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); - pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); - - pcr->aspm_en = rts5261_reg_to_aspm(reg); - pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg); - pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg); -} - static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { /* Set relink_time to 0 */ @@ -391,11 +357,11 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr) } -static int rts5261_init_from_hw(struct rtsx_pcr *pcr) +static void rts5261_init_from_hw(struct rtsx_pcr *pcr) { struct pci_dev *pdev = pcr->pci; - int retval; - u32 lval, i; + u32 lval1, lval2, i; + u16 setting_reg1, setting_reg2; u8 valid, efuse_valid, tmp; rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, @@ -418,26 +384,70 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr) efuse_valid = ((tmp & 0x0C) >> 2); pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid); - if (efuse_valid == 0) { - retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); - if (retval != 0) - pcr_dbg(pcr, "read 0x814 DW fail\n"); - pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval); - /* 0x816 */ - valid = (u8)((lval >> 16) & 0x03); - pcr_dbg(pcr, "0x816: %d\n", valid); - } + pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2); + /* 0x816 */ + valid = (u8)((lval2 >> 16) & 0x03); + rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, REG_EFUSE_POR, 0); pcr_dbg(pcr, "Disable efuse por!\n"); - pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); - lval = lval & 0x00FFFFFF; - retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval); - if (retval != 0) - pcr_dbg(pcr, "write config fail\n"); + if (efuse_valid == 2 || efuse_valid == 3) { + if (valid == 3) { + /* Bypass efuse */ + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } else { + /* Use efuse data */ + setting_reg1 = PCR_SETTING_REG4; + setting_reg2 = PCR_SETTING_REG5; + } + } else if (efuse_valid == 0) { + // default + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } + + pci_read_config_dword(pdev, setting_reg2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2); + + if (!rts5261_vendor_setting_valid(lval2)) { + /* Not support MMC default */ + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + + if (!rts5261_reg_check_mmc_support(lval2)) + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; - return retval; + pcr->rtd3_en = rts5261_reg_to_rtd3(lval2); + + if (rts5261_reg_check_reverse_socket(lval2)) + pcr->flags |= PCR_REVERSE_SOCKET; + + pci_read_config_dword(pdev, setting_reg1, &lval1); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1); + + pcr->aspm_en = rts5261_reg_to_aspm(lval1); + pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(lval1); + pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(lval1); + + if (setting_reg1 == PCR_SETTING_REG1) { + /* store setting */ + rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF)); + + pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1); + lval2 = lval2 & 0x00FFFFFF; + pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2); + } } static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) @@ -636,7 +646,6 @@ static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) } static const struct pcr_ops rts5261_pcr_ops = { - .fetch_vendor_settings = rtsx5261_fetch_vendor_settings, .turn_on_led = rts5261_turn_on_led, .turn_off_led = rts5261_turn_off_led, .extra_init_hw = rts5261_extra_init_hw, diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c index 59eda55d92a3..1ef9b61077c4 100644 --- a/drivers/misc/cardreader/rtsx_usb.c +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf, return 0; out_init_fail: + usb_set_intfdata(ucr->pusb_intf, NULL); usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf, ucr->iobuf_dma); return ret; diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index d80ada8cac09..93ebd174d848 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1606,17 +1606,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_req_munmap *req) { struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; - struct fastrpc_buf *buf, *b; + struct fastrpc_buf *buf = NULL, *iter, *b; struct fastrpc_munmap_req_msg req_msg; struct device *dev = fl->sctx->dev; int err; u32 sc; spin_lock(&fl->lock); - list_for_each_entry_safe(buf, b, &fl->mmaps, node) { - if ((buf->raddr == req->vaddrout) && (buf->size == req->size)) + list_for_each_entry_safe(iter, b, &fl->mmaps, node) { + if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) { + buf = iter; break; - buf = NULL; + } } spin_unlock(&fl->lock); @@ -1747,17 +1748,18 @@ err_invoke: static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req) { struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; - struct fastrpc_map *map = NULL, *m; + struct fastrpc_map *map = NULL, *iter, *m; struct fastrpc_mem_unmap_req_msg req_msg = { 0 }; int err = 0; u32 sc; struct device *dev = fl->sctx->dev; spin_lock(&fl->lock); - list_for_each_entry_safe(map, m, &fl->maps, node) { - if ((req->fd < 0 || map->fd == req->fd) && (map->raddr == req->vaddr)) + list_for_each_entry_safe(iter, m, &fl->maps, node) { + if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) { + map = iter; break; - map = NULL; + } } spin_unlock(&fl->lock); diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile index 6ebe3c7001ff..934a3a4aedc9 100644 --- a/drivers/misc/habanalabs/common/Makefile +++ b/drivers/misc/habanalabs/common/Makefile @@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \ common/command_buffer.o common/hw_queue.o common/irq.o \ common/sysfs.o common/hwmon.o common/memory.o \ common/command_submission.o common/firmware_if.o \ - common/state_dump.o + common/state_dump.o common/memory_mgr.o diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index a507110f6443..e13b2b39c058 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -160,24 +160,6 @@ static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) } } -static void cb_release(struct kref *ref) -{ - struct hl_device *hdev; - struct hl_cb *cb; - - cb = container_of(ref, struct hl_cb, refcount); - hdev = cb->hdev; - - hl_debugfs_remove_cb(cb); - - if (cb->is_mmu_mapped) - cb_unmap_mem(cb->ctx, cb); - - hl_ctx_put(cb->ctx); - - cb_do_release(hdev, cb); -} - static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, int ctx_id, bool internal_cb) { @@ -238,168 +220,175 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, return cb; } -int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, - struct hl_ctx *ctx, u32 cb_size, bool internal_cb, - bool map_cb, u64 *handle) +struct hl_cb_mmap_mem_alloc_args { + struct hl_device *hdev; + struct hl_ctx *ctx; + u32 cb_size; + bool internal_cb; + bool map_cb; +}; + +static void hl_cb_mmap_mem_release(struct hl_mmap_mem_buf *buf) { - struct hl_cb *cb; - bool alloc_new_cb = true; - int rc, ctx_id = ctx->asid; + struct hl_cb *cb = buf->private; - /* - * Can't use generic function to check this because of special case - * where we create a CB as part of the reset process - */ - if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) { - dev_warn_ratelimited(hdev->dev, - "Device is disabled or in reset. Can't create new CBs\n"); - rc = -EBUSY; - goto out_err; - } + hl_debugfs_remove_cb(cb); - if (cb_size > SZ_2M) { - dev_err(hdev->dev, "CB size %d must be less than %d\n", - cb_size, SZ_2M); - rc = -EINVAL; - goto out_err; - } + if (cb->is_mmu_mapped) + cb_unmap_mem(cb->ctx, cb); + + hl_ctx_put(cb->ctx); - if (!internal_cb) { + cb_do_release(cb->hdev, cb); +} + +static int hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args) +{ + struct hl_cb_mmap_mem_alloc_args *cb_args = args; + struct hl_cb *cb; + int rc, ctx_id = cb_args->ctx->asid; + bool alloc_new_cb = true; + + if (!cb_args->internal_cb) { /* Minimum allocation must be PAGE SIZE */ - if (cb_size < PAGE_SIZE) - cb_size = PAGE_SIZE; + if (cb_args->cb_size < PAGE_SIZE) + cb_args->cb_size = PAGE_SIZE; if (ctx_id == HL_KERNEL_ASID_ID && - cb_size <= hdev->asic_prop.cb_pool_cb_size) { + cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) { - spin_lock(&hdev->cb_pool_lock); - if (!list_empty(&hdev->cb_pool)) { - cb = list_first_entry(&hdev->cb_pool, + spin_lock(&cb_args->hdev->cb_pool_lock); + if (!list_empty(&cb_args->hdev->cb_pool)) { + cb = list_first_entry(&cb_args->hdev->cb_pool, typeof(*cb), pool_list); list_del(&cb->pool_list); - spin_unlock(&hdev->cb_pool_lock); + spin_unlock(&cb_args->hdev->cb_pool_lock); alloc_new_cb = false; } else { - spin_unlock(&hdev->cb_pool_lock); - dev_dbg(hdev->dev, "CB pool is empty\n"); + spin_unlock(&cb_args->hdev->cb_pool_lock); + dev_dbg(cb_args->hdev->dev, "CB pool is empty\n"); } } } if (alloc_new_cb) { - cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb); - if (!cb) { - rc = -ENOMEM; - goto out_err; - } + cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb); + if (!cb) + return -ENOMEM; } - cb->hdev = hdev; - cb->ctx = ctx; - hl_ctx_get(hdev, cb->ctx); + cb->hdev = cb_args->hdev; + cb->ctx = cb_args->ctx; + cb->buf = buf; + cb->buf->mappable_size = cb->size; + cb->buf->private = cb; + + hl_ctx_get(cb->ctx); - if (map_cb) { + if (cb_args->map_cb) { if (ctx_id == HL_KERNEL_ASID_ID) { - dev_err(hdev->dev, + dev_err(cb_args->hdev->dev, "CB mapping is not supported for kernel context\n"); rc = -EINVAL; goto release_cb; } - rc = cb_map_mem(ctx, cb); + rc = cb_map_mem(cb_args->ctx, cb); if (rc) goto release_cb; } - spin_lock(&mgr->cb_lock); - rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); - spin_unlock(&mgr->cb_lock); - - if (rc < 0) { - dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); - goto unmap_mem; - } - - cb->id = (u64) rc; - - kref_init(&cb->refcount); - spin_lock_init(&cb->lock); - - /* - * idr is 32-bit so we can safely OR it with a mask that is above - * 32 bit - */ - *handle = cb->id | HL_MMAP_TYPE_CB; - *handle <<= PAGE_SHIFT; - hl_debugfs_add_cb(cb); return 0; -unmap_mem: - if (cb->is_mmu_mapped) - cb_unmap_mem(cb->ctx, cb); release_cb: hl_ctx_put(cb->ctx); - cb_do_release(hdev, cb); -out_err: - *handle = 0; + cb_do_release(cb_args->hdev, cb); return rc; } -int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) +static int hl_cb_mmap(struct hl_mmap_mem_buf *buf, + struct vm_area_struct *vma, void *args) { - struct hl_cb *cb; - u32 handle; - int rc = 0; + struct hl_cb *cb = buf->private; - /* - * handle was given to user to do mmap, I need to shift it back to - * how the idr module gave it to me - */ - cb_handle >>= PAGE_SHIFT; - handle = (u32) cb_handle; + return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address, + cb->bus_address, cb->size); +} - spin_lock(&mgr->cb_lock); +static struct hl_mmap_mem_buf_behavior cb_behavior = { + .topic = "CB", + .mem_id = HL_MMAP_TYPE_CB, + .alloc = hl_cb_mmap_mem_alloc, + .release = hl_cb_mmap_mem_release, + .mmap = hl_cb_mmap, +}; - cb = idr_find(&mgr->cb_handles, handle); - if (cb) { - idr_remove(&mgr->cb_handles, handle); - spin_unlock(&mgr->cb_lock); - kref_put(&cb->refcount, cb_release); - } else { - spin_unlock(&mgr->cb_lock); - dev_err(hdev->dev, - "CB destroy failed, no match to handle 0x%x\n", handle); - rc = -EINVAL; +int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg, + struct hl_ctx *ctx, u32 cb_size, bool internal_cb, + bool map_cb, u64 *handle) +{ + struct hl_cb_mmap_mem_alloc_args args = { + .hdev = hdev, + .ctx = ctx, + .cb_size = cb_size, + .internal_cb = internal_cb, + .map_cb = map_cb, + }; + struct hl_mmap_mem_buf *buf; + int ctx_id = ctx->asid; + + if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) { + dev_warn_ratelimited(hdev->dev, + "Device is disabled or in reset. Can't create new CBs\n"); + return -EBUSY; } - return rc; + if (cb_size > SZ_2M) { + dev_err(hdev->dev, "CB size %d must be less than %d\n", + cb_size, SZ_2M); + return -EINVAL; + } + + buf = hl_mmap_mem_buf_alloc( + mmg, &cb_behavior, + ctx_id == HL_KERNEL_ASID_ID ? GFP_ATOMIC : GFP_KERNEL, &args); + if (!buf) + return -ENOMEM; + + *handle = buf->handle; + + return 0; +} + +int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle) +{ + int rc; + + rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle); + if (rc < 0) + return rc; /* Invalid handle */ + + if (rc == 0) + dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle); + + return 0; } -static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, - u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va) +static int hl_cb_info(struct hl_mem_mgr *mmg, + u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va) { struct hl_vm_va_block *va_block; struct hl_cb *cb; - u32 handle; int rc = 0; - /* The CB handle was given to user to do mmap, so need to shift it back - * to the value which was allocated by the IDR module. - */ - cb_handle >>= PAGE_SHIFT; - handle = (u32) cb_handle; - - spin_lock(&mgr->cb_lock); - - cb = idr_find(&mgr->cb_handles, handle); + cb = hl_cb_get(mmg, handle); if (!cb) { - dev_err(hdev->dev, - "CB info failed, no match to handle 0x%x\n", handle); - rc = -EINVAL; - goto out; + dev_err(mmg->dev, + "CB info failed, no match to handle 0x%llx\n", handle); + return -EINVAL; } if (flags & HL_CB_FLAGS_GET_DEVICE_VA) { @@ -407,7 +396,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, if (va_block) { *device_va = va_block->start; } else { - dev_err(hdev->dev, "CB is not mapped to the device's MMU\n"); + dev_err(mmg->dev, "CB is not mapped to the device's MMU\n"); rc = -EINVAL; goto out; } @@ -416,7 +405,7 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, } out: - spin_unlock(&mgr->cb_lock); + hl_cb_put(cb); return rc; } @@ -444,7 +433,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) args->in.cb_size, HL_MAX_CB_SIZE); rc = -EINVAL; } else { - rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx, + rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx, args->in.cb_size, false, !!(args->in.flags & HL_CB_FLAGS_MAP), &handle); @@ -455,12 +444,12 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) break; case HL_CB_OP_DESTROY: - rc = hl_cb_destroy(hdev, &hpriv->cb_mgr, + rc = hl_cb_destroy(&hpriv->mem_mgr, args->in.cb_handle); break; case HL_CB_OP_INFO: - rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle, + rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle, args->in.flags, &usage_cnt, &device_va); @@ -483,163 +472,20 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) return rc; } -static void cb_vm_close(struct vm_area_struct *vma) +struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle) { - struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; - long new_mmap_size; - - new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start); - - if (new_mmap_size > 0) { - cb->mmap_size = new_mmap_size; - return; - } - - spin_lock(&cb->lock); - cb->mmap = false; - spin_unlock(&cb->lock); - - hl_cb_put(cb); - vma->vm_private_data = NULL; -} - -static const struct vm_operations_struct cb_vm_ops = { - .close = cb_vm_close -}; - -int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) -{ - struct hl_device *hdev = hpriv->hdev; - struct hl_cb *cb; - u32 handle, user_cb_size; - int rc; - - /* We use the page offset to hold the idr and thus we need to clear - * it before doing the mmap itself - */ - handle = vma->vm_pgoff; - vma->vm_pgoff = 0; - - /* reference was taken here */ - cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); - if (!cb) { - dev_err(hdev->dev, - "CB mmap failed, no match to handle 0x%x\n", handle); - return -EINVAL; - } - - /* Validation check */ - user_cb_size = vma->vm_end - vma->vm_start; - if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) { - dev_err(hdev->dev, - "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n", - vma->vm_end - vma->vm_start, cb->size); - rc = -EINVAL; - goto put_cb; - } - - if (!access_ok((void __user *) (uintptr_t) vma->vm_start, - user_cb_size)) { - dev_err(hdev->dev, - "user pointer is invalid - 0x%lx\n", - vma->vm_start); - - rc = -EINVAL; - goto put_cb; - } - - spin_lock(&cb->lock); + struct hl_mmap_mem_buf *buf; - if (cb->mmap) { - dev_err(hdev->dev, - "CB mmap failed, CB already mmaped to user\n"); - rc = -EINVAL; - goto release_lock; - } - - cb->mmap = true; - - spin_unlock(&cb->lock); - - vma->vm_ops = &cb_vm_ops; - - /* - * Note: We're transferring the cb reference to - * vma->vm_private_data here. - */ - - vma->vm_private_data = cb; - - rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address, - cb->bus_address, cb->size); - if (rc) { - spin_lock(&cb->lock); - cb->mmap = false; - goto release_lock; - } - - cb->mmap_size = cb->size; - vma->vm_pgoff = handle; - - return 0; - -release_lock: - spin_unlock(&cb->lock); -put_cb: - hl_cb_put(cb); - return rc; -} - -struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, - u32 handle) -{ - struct hl_cb *cb; - - spin_lock(&mgr->cb_lock); - cb = idr_find(&mgr->cb_handles, handle); - - if (!cb) { - spin_unlock(&mgr->cb_lock); - dev_warn(hdev->dev, - "CB get failed, no match to handle 0x%x\n", handle); + buf = hl_mmap_mem_buf_get(mmg, handle); + if (!buf) return NULL; - } - - kref_get(&cb->refcount); - - spin_unlock(&mgr->cb_lock); - - return cb; + return buf->private; } void hl_cb_put(struct hl_cb *cb) { - kref_put(&cb->refcount, cb_release); -} - -void hl_cb_mgr_init(struct hl_cb_mgr *mgr) -{ - spin_lock_init(&mgr->cb_lock); - idr_init(&mgr->cb_handles); -} - -void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) -{ - struct hl_cb *cb; - struct idr *idp; - u32 id; - - idp = &mgr->cb_handles; - - idr_for_each_entry(idp, cb, id) { - if (kref_put(&cb->refcount, cb_release) != 1) - dev_err(hdev->dev, - "CB %d for CTX ID %d is still alive\n", - id, cb->ctx->asid); - } - - idr_destroy(&mgr->cb_handles); + hl_mmap_mem_buf_put(cb->buf); } struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, @@ -649,7 +495,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, struct hl_cb *cb; int rc; - rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size, + rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size, internal_cb, false, &cb_handle); if (rc) { dev_err(hdev->dev, @@ -657,8 +503,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, return NULL; } - cb_handle >>= PAGE_SHIFT; - cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); + cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle); /* hl_cb_get should never fail here */ if (!cb) { dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n", @@ -669,7 +514,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, return cb; destroy_cb: - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle); return NULL; } diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index d93ef9f1c45c..fb30b7de4aab 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -407,8 +407,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) { - bool next_entry_found = false; - struct hl_cs *next, *first_cs; + struct hl_cs *next = NULL, *iter, *first_cs; if (!cs_needs_timeout(cs)) return; @@ -443,13 +442,13 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) spin_lock(&hdev->cs_mirror_lock); /* queue TDR for next CS */ - list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node) - if (cs_needs_timeout(next)) { - next_entry_found = true; + list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node) + if (cs_needs_timeout(iter)) { + next = iter; break; } - if (next_entry_found && !next->tdr_active) { + if (next && !next->tdr_active) { next->tdr_active = true; schedule_delayed_work(&next->work_tdr, next->timeout_jiffies); } @@ -736,11 +735,10 @@ static void cs_timedout(struct work_struct *work) hdev = cs->ctx->hdev; /* Save only the first CS timeout parameters */ - rc = atomic_cmpxchg(&hdev->last_error.cs_write_disable, 0, 1); + rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_disable, 0, 1); if (!rc) { - hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime; - hdev->last_error.cs_timeout_timestamp = ktime_get(); - hdev->last_error.cs_timeout_seq = cs->sequence; + hdev->last_error.cs_timeout.timestamp = ktime_get(); + hdev->last_error.cs_timeout.seq = cs->sequence; } switch (cs->type) { @@ -806,7 +804,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, } /* increment refcnt for context */ - hl_ctx_get(hdev, ctx); + hl_ctx_get(ctx); cs->ctx = ctx; cs->submitted = false; @@ -958,9 +956,9 @@ wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt) spin_lock_irqsave(&interrupt->wait_list_lock, flags); list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) { - if (pend->ts_reg_info.ts_buff) { + if (pend->ts_reg_info.buf) { list_del(&pend->wait_list_node); - hl_ts_put(pend->ts_reg_info.ts_buff); + hl_mmap_mem_buf_put(pend->ts_reg_info.buf); hl_cb_put(pend->ts_reg_info.cq_cb); } else { pend->fence.error = -EIO; @@ -1072,17 +1070,14 @@ static int validate_queue_index(struct hl_device *hdev, } static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, - struct hl_cb_mgr *cb_mgr, + struct hl_mem_mgr *mmg, struct hl_cs_chunk *chunk) { struct hl_cb *cb; - u32 cb_handle; - cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT); - - cb = hl_cb_get(hdev, cb_mgr, cb_handle); + cb = hl_cb_get(mmg, chunk->cb_handle); if (!cb) { - dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle); + dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle); return NULL; } @@ -1344,7 +1339,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, } if (is_kernel_allocated_cb) { - cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); + cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk); if (!cb) { atomic64_inc( &ctx->cs_counters.validation_drop_cnt); @@ -1772,7 +1767,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, */ job->patched_cb = job->user_cb; job->job_cb_size = job->user_cb_size; - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); /* increment refcount as for external queues we get completion */ cs_get(cs); @@ -1834,7 +1829,7 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv, handle->count = count; - hl_ctx_get(hdev, hpriv->ctx); + hl_ctx_get(hpriv->ctx); handle->ctx = hpriv->ctx; mgr = &hpriv->ctx->sig_mgr; @@ -2528,7 +2523,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, if (timestamp) *timestamp = 0; - hl_ctx_get(hdev, ctx); + hl_ctx_get(ctx); fence = hl_ctx_get_fence(ctx, seq); @@ -2668,7 +2663,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) { struct multi_cs_completion *mcs_compl; struct hl_device *hdev = hpriv->hdev; - struct multi_cs_data mcs_data = {0}; + struct multi_cs_data mcs_data = {}; union hl_wait_cs_args *args = data; struct hl_ctx *ctx = hpriv->ctx; struct hl_fence **fence_arr; @@ -2719,7 +2714,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) mcs_data.fence_arr = fence_arr; mcs_data.arr_len = seq_arr_len; - hl_ctx_get(hdev, ctx); + hl_ctx_get(ctx); /* wait (with timeout) for the first CS to be completed */ mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us); @@ -2868,12 +2863,13 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) return 0; } -static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff, +static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf, struct hl_cb *cq_cb, u64 ts_offset, u64 cq_offset, u64 target_value, spinlock_t *wait_list_lock, struct hl_user_pending_interrupt **pend) { + struct hl_ts_buff *ts_buff = buf->private; struct hl_user_pending_interrupt *requested_offset_record = (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address + ts_offset; @@ -2885,7 +2881,7 @@ static int ts_buff_get_kernel_ts_record(struct hl_ts_buff *ts_buff, /* Validate ts_offset not exceeding last max */ if (requested_offset_record > cb_last) { - dev_err(ts_buff->hdev->dev, "Ts offset exceeds max CB offset(0x%llx)\n", + dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n", (u64)(uintptr_t)cb_last); return -EINVAL; } @@ -2904,18 +2900,21 @@ start_over: list_del(&requested_offset_record->wait_list_node); spin_unlock_irqrestore(wait_list_lock, flags); - hl_ts_put(requested_offset_record->ts_reg_info.ts_buff); + hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf); hl_cb_put(requested_offset_record->ts_reg_info.cq_cb); - dev_dbg(ts_buff->hdev->dev, "ts node removed from interrupt list now can re-use\n"); + dev_dbg(buf->mmg->dev, + "ts node removed from interrupt list now can re-use\n"); } else { - dev_dbg(ts_buff->hdev->dev, "ts node in middle of irq handling\n"); + dev_dbg(buf->mmg->dev, + "ts node in middle of irq handling\n"); /* irq handling in the middle give it time to finish */ spin_unlock_irqrestore(wait_list_lock, flags); usleep_range(1, 10); if (++iter_counter == MAX_TS_ITER_NUM) { - dev_err(ts_buff->hdev->dev, "handling registration interrupt took too long!!\n"); + dev_err(buf->mmg->dev, + "handling registration interrupt took too long!!\n"); return -EINVAL; } @@ -2927,7 +2926,7 @@ start_over: /* Fill up the new registration node info */ requested_offset_record->ts_reg_info.in_use = 1; - requested_offset_record->ts_reg_info.ts_buff = ts_buff; + requested_offset_record->ts_reg_info.buf = buf; requested_offset_record->ts_reg_info.cq_cb = cq_cb; requested_offset_record->ts_reg_info.timestamp_kernel_addr = (u64 *) ts_buff->user_buff_address + ts_offset; @@ -2937,21 +2936,20 @@ start_over: *pend = requested_offset_record; - dev_dbg(ts_buff->hdev->dev, "Found available node in TS kernel CB(0x%llx)\n", + dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB(0x%llx)\n", (u64)(uintptr_t)requested_offset_record); return 0; } static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, - struct hl_cb_mgr *cb_mgr, struct hl_ts_mgr *ts_mgr, + struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg, u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset, u64 target_value, struct hl_user_interrupt *interrupt, bool register_ts_record, u64 ts_handle, u64 ts_offset, u32 *status, u64 *timestamp) { - u32 cq_patched_handle, ts_patched_handle; struct hl_user_pending_interrupt *pend; - struct hl_ts_buff *ts_buff; + struct hl_mmap_mem_buf *buf; struct hl_cb *cq_cb; unsigned long timeout, flags; long completion_rc; @@ -2959,10 +2957,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, timeout = hl_usecs64_to_jiffies(timeout_us); - hl_ctx_get(hdev, ctx); + hl_ctx_get(ctx); - cq_patched_handle = lower_32_bits(cq_counters_handle >> PAGE_SHIFT); - cq_cb = hl_cb_get(hdev, cb_mgr, cq_patched_handle); + cq_cb = hl_cb_get(cb_mmg, cq_counters_handle); if (!cq_cb) { rc = -EINVAL; goto put_ctx; @@ -2971,16 +2968,14 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, if (register_ts_record) { dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n", interrupt->interrupt_id, ts_offset, cq_counters_offset); - - ts_patched_handle = lower_32_bits(ts_handle >> PAGE_SHIFT); - ts_buff = hl_ts_get(hdev, ts_mgr, ts_patched_handle); - if (!ts_buff) { + buf = hl_mmap_mem_buf_get(mmg, ts_handle); + if (!buf) { rc = -EINVAL; goto put_cq_cb; } /* Find first available record */ - rc = ts_buff_get_kernel_ts_record(ts_buff, cq_cb, ts_offset, + rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset, cq_counters_offset, target_value, &interrupt->wait_list_lock, &pend); if (rc) @@ -3087,7 +3082,7 @@ ts_registration_exit: return rc; put_ts_buff: - hl_ts_put(ts_buff); + hl_mmap_mem_buf_put(buf); put_cq_cb: hl_cb_put(cq_cb); put_ctx: @@ -3111,7 +3106,7 @@ static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ timeout = hl_usecs64_to_jiffies(timeout_us); - hl_ctx_get(hdev, ctx); + hl_ctx_get(ctx); pend = kzalloc(sizeof(*pend), GFP_KERNEL); if (!pend) { @@ -3249,7 +3244,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt]; if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ) - rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->cb_mgr, &hpriv->ts_mem_mgr, + rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr, args->in.interrupt_timeout_us, args->in.cq_counters_handle, args->in.cq_counters_offset, args->in.target, interrupt, diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index c6360e33bce8..ed2cfd0c6e99 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -262,7 +262,7 @@ err_hw_block_mem_fini: return rc; } -void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx) +void hl_ctx_get(struct hl_ctx *ctx) { kref_get(&ctx->refcount); } @@ -284,7 +284,7 @@ struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev) * immediately once we find him */ ctx = hpriv->ctx; - hl_ctx_get(hdev, ctx); + hl_ctx_get(ctx); break; } diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index f18495545854..c6744bfc6da4 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> +#include <linux/iommu.h> #define MMU_ADDR_BUF_SIZE 40 #define MMU_ASID_BUF_SIZE 10 @@ -125,9 +126,9 @@ static int command_buffers_show(struct seq_file *s, void *data) } seq_printf(s, " %03llu %d 0x%08x %d %d %d\n", - cb->id, cb->ctx->asid, cb->size, - kref_read(&cb->refcount), - cb->mmap, atomic_read(&cb->cs_cnt)); + cb->buf->handle, cb->ctx->asid, cb->size, + kref_read(&cb->buf->refcount), + atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt)); } spin_unlock(&dev_entry->cb_spinlock); @@ -369,8 +370,7 @@ static int userptr_lookup_show(struct seq_file *s, void *data) if (dev_entry->userptr_lookup >= userptr->addr && dev_entry->userptr_lookup < userptr->addr + userptr->size) { total_npages = 0; - for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, - i) { + for_each_sgtable_dma_sg(userptr->sgt, sg, i) { npages = hl_get_sg_info(sg, &dma_addr); sg_start = userptr->addr + total_npages * PAGE_SIZE; @@ -538,6 +538,39 @@ static int engines_show(struct seq_file *s, void *data) return 0; } +static ssize_t hl_memory_scrub(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u64 val = entry->memory_scrub_val; + int rc; + + if (!hl_device_operational(hdev, NULL)) { + dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n"); + return -EIO; + } + + mutex_lock(&hdev->fpriv_list_lock); + if (hdev->is_compute_ctx_active) { + mutex_unlock(&hdev->fpriv_list_lock); + dev_err(hdev->dev, "can't scrub dram, context exist\n"); + return -EBUSY; + } + hdev->is_in_dram_scrub = true; + mutex_unlock(&hdev->fpriv_list_lock); + + rc = hdev->asic_funcs->scrub_device_dram(hdev, val); + + mutex_lock(&hdev->fpriv_list_lock); + hdev->is_in_dram_scrub = false; + mutex_unlock(&hdev->fpriv_list_lock); + + if (rc) + return rc; + return count; +} + static bool hl_is_device_va(struct hl_device *hdev, u64 addr) { struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -647,13 +680,105 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size, return rc; } +static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr, + u64 *val, enum debugfs_access_type acc_type, bool *found) +{ + size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ? + sizeof(u64) : sizeof(u32); + struct pci_mem_region *mem_reg; + int i; + + for (i = 0; i < PCI_REGION_NUMBER; i++) { + mem_reg = &hdev->pci_mem_region[i]; + if (!mem_reg->used) + continue; + if (addr >= mem_reg->region_base && + addr <= mem_reg->region_base + mem_reg->region_size - acc_size) { + *found = true; + return hdev->asic_funcs->access_dev_mem(hdev, mem_reg, i, + addr, val, acc_type); + } + } + return 0; +} + +static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val, + enum debugfs_access_type acc_type) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 offset = prop->device_dma_offset_for_host_access; + + switch (acc_type) { + case DEBUGFS_READ32: + *val = *(u32 *) phys_to_virt(addr - offset); + break; + case DEBUGFS_WRITE32: + *(u32 *) phys_to_virt(addr - offset) = *val; + break; + case DEBUGFS_READ64: + *val = *(u64 *) phys_to_virt(addr - offset); + break; + case DEBUGFS_WRITE64: + *(u64 *) phys_to_virt(addr - offset) = *val; + break; + default: + dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type); + break; + } +} + +static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val, + enum debugfs_access_type acc_type) +{ + size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ? + sizeof(u64) : sizeof(u32); + u64 host_start = hdev->asic_prop.host_base_address; + u64 host_end = hdev->asic_prop.host_end_address; + bool user_address, found = false; + int rc; + + user_address = hl_is_device_va(hdev, addr); + if (user_address) { + rc = device_va_to_pa(hdev, addr, acc_size, &addr); + if (rc) + return rc; + } + + rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found); + if (rc) { + dev_err(hdev->dev, + "Failed reading addr %#llx from dev mem (%d)\n", + addr, rc); + return rc; + } + + if (found) + return 0; + + if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) { + rc = -EINVAL; + goto err; + } + + if (addr >= host_start && addr <= host_end - acc_size) { + hl_access_host_mem(hdev, addr, val, acc_type); + } else { + rc = -EINVAL; + goto err; + } + + return 0; +err: + dev_err(hdev->dev, "invalid addr %#llx\n", addr); + return rc; +} + static ssize_t hl_data_read32(struct file *f, char __user *buf, size_t count, loff_t *ppos) { struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; - u64 addr = entry->addr; - bool user_address; + u64 value64, addr = entry->addr; char tmp_buf[32]; ssize_t rc; u32 val; @@ -666,18 +791,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf, if (*ppos) return 0; - user_address = hl_is_device_va(hdev, addr); - if (user_address) { - rc = device_va_to_pa(hdev, addr, sizeof(val), &addr); - if (rc) - return rc; - } - - rc = hdev->asic_funcs->debugfs_read32(hdev, addr, user_address, &val); - if (rc) { - dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr); + rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32); + if (rc) return rc; - } + + val = value64; /* downcast back to 32 */ sprintf(tmp_buf, "0x%08x\n", val); return simple_read_from_buffer(buf, count, ppos, tmp_buf, @@ -689,8 +807,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf, { struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; - u64 addr = entry->addr; - bool user_address; + u64 value64, addr = entry->addr; u32 value; ssize_t rc; @@ -703,19 +820,10 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf, if (rc) return rc; - user_address = hl_is_device_va(hdev, addr); - if (user_address) { - rc = device_va_to_pa(hdev, addr, sizeof(value), &addr); - if (rc) - return rc; - } - - rc = hdev->asic_funcs->debugfs_write32(hdev, addr, user_address, value); - if (rc) { - dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n", - value, addr); + value64 = value; + rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32); + if (rc) return rc; - } return count; } @@ -726,7 +834,6 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf, struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; u64 addr = entry->addr; - bool user_address; char tmp_buf[32]; ssize_t rc; u64 val; @@ -739,18 +846,9 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf, if (*ppos) return 0; - user_address = hl_is_device_va(hdev, addr); - if (user_address) { - rc = device_va_to_pa(hdev, addr, sizeof(val), &addr); - if (rc) - return rc; - } - - rc = hdev->asic_funcs->debugfs_read64(hdev, addr, user_address, &val); - if (rc) { - dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr); + rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64); + if (rc) return rc; - } sprintf(tmp_buf, "0x%016llx\n", val); return simple_read_from_buffer(buf, count, ppos, tmp_buf, @@ -763,7 +861,6 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf, struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; u64 addr = entry->addr; - bool user_address; u64 value; ssize_t rc; @@ -776,19 +873,9 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf, if (rc) return rc; - user_address = hl_is_device_va(hdev, addr); - if (user_address) { - rc = device_va_to_pa(hdev, addr, sizeof(value), &addr); - if (rc) - return rc; - } - - rc = hdev->asic_funcs->debugfs_write64(hdev, addr, user_address, value); - if (rc) { - dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n", - value, addr); + rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64); + if (rc) return rc; - } return count; } @@ -829,23 +916,67 @@ static ssize_t hl_dma_size_write(struct file *f, const char __user *buf, } /* Free the previous allocation, if there was any */ - entry->blob_desc.size = 0; - vfree(entry->blob_desc.data); + entry->data_dma_blob_desc.size = 0; + vfree(entry->data_dma_blob_desc.data); - entry->blob_desc.data = vmalloc(size); - if (!entry->blob_desc.data) + entry->data_dma_blob_desc.data = vmalloc(size); + if (!entry->data_dma_blob_desc.data) return -ENOMEM; rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size, - entry->blob_desc.data); + entry->data_dma_blob_desc.data); if (rc) { dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr); - vfree(entry->blob_desc.data); - entry->blob_desc.data = NULL; + vfree(entry->data_dma_blob_desc.data); + entry->data_dma_blob_desc.data = NULL; + return -EIO; + } + + entry->data_dma_blob_desc.size = size; + + return count; +} + +static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 size, trig; + ssize_t rc; + + if (hdev->reset_info.in_reset) { + dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n"); + return 0; + } + rc = kstrtouint_from_user(buf, count, 10, &trig); + if (rc) + return rc; + + if (trig != 1) { + dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n"); + return -EINVAL; + } + + size = sizeof(struct cpucp_monitor_dump); + + /* Free the previous allocation, if there was any */ + entry->mon_dump_blob_desc.size = 0; + vfree(entry->mon_dump_blob_desc.data); + + entry->mon_dump_blob_desc.data = vmalloc(size); + if (!entry->mon_dump_blob_desc.data) + return -ENOMEM; + + rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data); + if (rc) { + dev_err(hdev->dev, "Failed to dump monitors\n"); + vfree(entry->mon_dump_blob_desc.data); + entry->mon_dump_blob_desc.data = NULL; return -EIO; } - entry->blob_desc.size = size; + entry->mon_dump_blob_desc.size = size; return count; } @@ -1218,6 +1349,11 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf, return count; } +static const struct file_operations hl_mem_scrub_fops = { + .owner = THIS_MODULE, + .write = hl_memory_scrub, +}; + static const struct file_operations hl_data32b_fops = { .owner = THIS_MODULE, .read = hl_data_read32, @@ -1235,6 +1371,11 @@ static const struct file_operations hl_dma_size_fops = { .write = hl_dma_size_write }; +static const struct file_operations hl_monitor_dump_fops = { + .owner = THIS_MODULE, + .write = hl_monitor_dump_trigger +}; + static const struct file_operations hl_i2c_data_fops = { .owner = THIS_MODULE, .read = hl_i2c_data_read, @@ -1350,8 +1491,10 @@ void hl_debugfs_add_device(struct hl_device *hdev) if (!dev_entry->entry_arr) return; - dev_entry->blob_desc.size = 0; - dev_entry->blob_desc.data = NULL; + dev_entry->data_dma_blob_desc.size = 0; + dev_entry->data_dma_blob_desc.data = NULL; + dev_entry->mon_dump_blob_desc.size = 0; + dev_entry->mon_dump_blob_desc.data = NULL; INIT_LIST_HEAD(&dev_entry->file_list); INIT_LIST_HEAD(&dev_entry->cb_list); @@ -1370,6 +1513,17 @@ void hl_debugfs_add_device(struct hl_device *hdev) dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), hl_debug_root); + debugfs_create_x64("memory_scrub_val", + 0644, + dev_entry->root, + &dev_entry->memory_scrub_val); + + debugfs_create_file("memory_scrub", + 0200, + dev_entry->root, + dev_entry, + &hl_mem_scrub_fops); + debugfs_create_x64("addr", 0644, dev_entry->root, @@ -1470,7 +1624,18 @@ void hl_debugfs_add_device(struct hl_device *hdev) debugfs_create_blob("data_dma", 0400, dev_entry->root, - &dev_entry->blob_desc); + &dev_entry->data_dma_blob_desc); + + debugfs_create_file("monitor_dump_trig", + 0200, + dev_entry->root, + dev_entry, + &hl_monitor_dump_fops); + + debugfs_create_blob("monitor_dump", + 0400, + dev_entry->root, + &dev_entry->mon_dump_blob_desc); debugfs_create_x8("skip_reset_on_timeout", 0644, @@ -1509,7 +1674,8 @@ void hl_debugfs_remove_device(struct hl_device *hdev) mutex_destroy(&entry->file_mutex); - vfree(entry->blob_desc.data); + vfree(entry->data_dma_blob_desc.data); + vfree(entry->mon_dump_blob_desc.data); for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i) vfree(entry->state_dump[i]); diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index dc9341a64541..b4f14c6d3970 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -15,6 +15,182 @@ #define HL_RESET_DELAY_USEC 10000 /* 10ms */ +/* + * hl_set_dram_bar- sets the bar to allow later access to address + * + * @hdev: pointer to habanalabs device structure + * @addr: the address the caller wants to access. + * + * @return: the old BAR base address on success, U64_MAX for failure. + * The caller should set it back to the old address after use. + * + * In case the bar space does not cover the whole address space, + * the bar base address should be set to allow access to a given address. + * This function can be called also if the bar doesn't need to be set, + * in that case it just won't change the base. + */ +static uint64_t hl_set_dram_bar(struct hl_device *hdev, u64 addr) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 bar_base_addr; + + bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull); + + return hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr); +} + + +static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val, + enum debugfs_access_type acc_type, enum pci_region region_type) +{ + struct pci_mem_region *region = &hdev->pci_mem_region[region_type]; + u64 old_base, rc; + + if (region_type == PCI_REGION_DRAM) { + old_base = hl_set_dram_bar(hdev, addr); + if (old_base == U64_MAX) + return -EIO; + } + + switch (acc_type) { + case DEBUGFS_READ8: + *val = readb(hdev->pcie_bar[region->bar_id] + + addr - region->region_base + region->offset_in_bar); + break; + case DEBUGFS_WRITE8: + writeb(*val, hdev->pcie_bar[region->bar_id] + + addr - region->region_base + region->offset_in_bar); + break; + case DEBUGFS_READ32: + *val = readl(hdev->pcie_bar[region->bar_id] + + addr - region->region_base + region->offset_in_bar); + break; + case DEBUGFS_WRITE32: + writel(*val, hdev->pcie_bar[region->bar_id] + + addr - region->region_base + region->offset_in_bar); + break; + case DEBUGFS_READ64: + *val = readq(hdev->pcie_bar[region->bar_id] + + addr - region->region_base + region->offset_in_bar); + break; + case DEBUGFS_WRITE64: + writeq(*val, hdev->pcie_bar[region->bar_id] + + addr - region->region_base + region->offset_in_bar); + break; + } + + if (region_type == PCI_REGION_DRAM) { + rc = hl_set_dram_bar(hdev, old_base); + if (rc == U64_MAX) + return -EIO; + } + + return 0; +} + +int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct scatterlist *sg; + int rc, i; + + rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0); + if (rc) + return rc; + + /* Shift to the device's base physical address of host memory if necessary */ + if (prop->device_dma_offset_for_host_access) + for_each_sgtable_dma_sg(sgt, sg, i) + sg->dma_address += prop->device_dma_offset_for_host_access; + + return 0; +} + +void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct scatterlist *sg; + int i; + + /* Cancel the device's base physical address of host memory if necessary */ + if (prop->device_dma_offset_for_host_access) + for_each_sgtable_dma_sg(sgt, sg, i) + sg->dma_address -= prop->device_dma_offset_for_host_access; + + dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0); +} + +/* + * hl_access_cfg_region - access the config region + * + * @hdev: pointer to habanalabs device structure + * @addr: the address to access + * @val: the value to write from or read to + * @acc_type: the type of access (read/write 64/32) + */ +int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val, + enum debugfs_access_type acc_type) +{ + struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG]; + u32 val_h, val_l; + + if (!IS_ALIGNED(addr, sizeof(u32))) { + dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32)); + return -EINVAL; + } + + switch (acc_type) { + case DEBUGFS_READ32: + *val = RREG32(addr - cfg_region->region_base); + break; + case DEBUGFS_WRITE32: + WREG32(addr - cfg_region->region_base, *val); + break; + case DEBUGFS_READ64: + val_l = RREG32(addr - cfg_region->region_base); + val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base); + + *val = (((u64) val_h) << 32) | val_l; + break; + case DEBUGFS_WRITE64: + WREG32(addr - cfg_region->region_base, lower_32_bits(*val)); + WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val)); + break; + default: + dev_err(hdev->dev, "access type %d is not supported\n", acc_type); + return -EOPNOTSUPP; + } + + return 0; +} + +/* + * hl_access_dev_mem - access device memory + * + * @hdev: pointer to habanalabs device structure + * @region: the memory region the address belongs to + * @region_type: the type of the region the address belongs to + * @addr: the address to access + * @val: the value to write from or read to + * @acc_type: the type of access (r/w, 32/64) + */ +int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region, + enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type) +{ + switch (region_type) { + case PCI_REGION_CFG: + return hl_access_cfg_region(hdev, addr, val, acc_type); + case PCI_REGION_SRAM: + case PCI_REGION_DRAM: + return hl_access_sram_dram_region(hdev, addr, val, acc_type, + region_type); + default: + return -EFAULT; + } + + return 0; +} + enum hl_device_status hl_device_status(struct hl_device *hdev) { enum hl_device_status status; @@ -107,6 +283,14 @@ static void hpriv_release(struct kref *ref) hdev->is_compute_ctx_active = false; mutex_unlock(&hdev->fpriv_list_lock); + hdev->compute_ctx_in_release = 0; + + /* release the eventfd */ + if (hpriv->notifier_event.eventfd) + eventfd_ctx_put(hpriv->notifier_event.eventfd); + + mutex_destroy(&hpriv->notifier_event.lock); + kfree(hpriv); } @@ -146,10 +330,11 @@ static int hl_device_release(struct inode *inode, struct file *filp) */ hl_release_pending_user_interrupts(hpriv->hdev); - hl_cb_mgr_fini(hdev, &hpriv->cb_mgr); - hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr); + hl_mem_mgr_fini(&hpriv->mem_mgr); hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr); + hdev->compute_ctx_in_release = 1; + if (!hl_hpriv_put(hpriv)) dev_notice(hdev->dev, "User process closed FD but device still in use\n"); @@ -176,6 +361,11 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp) list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_ctrl_list_lock); out: + /* release the eventfd */ + if (hpriv->notifier_event.eventfd) + eventfd_ctx_put(hpriv->notifier_event.eventfd); + + mutex_destroy(&hpriv->notifier_event.lock); put_pid(hpriv->taskpid); kfree(hpriv); @@ -204,17 +394,15 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma) } vm_pgoff = vma->vm_pgoff; - vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff); switch (vm_pgoff & HL_MMAP_TYPE_MASK) { - case HL_MMAP_TYPE_CB: - return hl_cb_mmap(hpriv, vma); - case HL_MMAP_TYPE_BLOCK: + vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff); return hl_hw_block_mmap(hpriv, vma); + case HL_MMAP_TYPE_CB: case HL_MMAP_TYPE_TS_BUFF: - return hl_ts_mmap(hpriv, vma); + return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL); } return -EINVAL; @@ -424,18 +612,25 @@ static int device_early_init(struct hl_device *hdev) goto free_eq_wq; } + hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0); + if (!hdev->pf_wq) { + dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n"); + rc = -ENOMEM; + goto free_ts_free_wq; + } + hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL); if (!hdev->hl_chip_info) { rc = -ENOMEM; - goto free_ts_free_wq; + goto free_pf_wq; } rc = hl_mmu_if_set_funcs(hdev); if (rc) goto free_chip_info; - hl_cb_mgr_init(&hdev->kernel_cb_mgr); + hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr); hdev->device_reset_work.wq = create_singlethread_workqueue("hl_device_reset"); @@ -464,9 +659,11 @@ static int device_early_init(struct hl_device *hdev) return 0; free_cb_mgr: - hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); + hl_mem_mgr_fini(&hdev->kernel_mem_mgr); free_chip_info: kfree(hdev->hl_chip_info); +free_pf_wq: + destroy_workqueue(hdev->pf_wq); free_ts_free_wq: destroy_workqueue(hdev->ts_free_obj_wq); free_eq_wq: @@ -503,10 +700,11 @@ static void device_early_fini(struct hl_device *hdev) mutex_destroy(&hdev->clk_throttling.lock); - hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); + hl_mem_mgr_fini(&hdev->kernel_mem_mgr); kfree(hdev->hl_chip_info); + destroy_workqueue(hdev->pf_wq); destroy_workqueue(hdev->ts_free_obj_wq); destroy_workqueue(hdev->eq_wq); destroy_workqueue(hdev->device_reset_work.wq); @@ -703,6 +901,9 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r /* Go over all the queues, release all CS and their jobs */ hl_cs_rollback_all(hdev, skip_wq_flush); + /* flush the MMU prefetch workqueue */ + flush_workqueue(hdev->pf_wq); + /* Release all pending user interrupts, each pending user interrupt * holds a reference to user context */ @@ -847,10 +1048,13 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool put_task_struct(task); } else { - dev_warn(hdev->dev, - "Can't get task struct for PID so giving up on killing process\n"); - mutex_unlock(fd_lock); - return -ETIME; + /* + * If we got here, it means that process was killed from outside the driver + * right after it started looping on fd_list and before get_pid_task, thus + * we don't need to kill it. + */ + dev_dbg(hdev->dev, + "Can't get task struct for user process, assuming process was killed from outside the driver\n"); } } @@ -1062,9 +1266,9 @@ do_reset: if (hard_reset) dev_info(hdev->dev, "Going to reset device\n"); else if (reset_upon_device_release) - dev_info(hdev->dev, "Going to reset device after release by user\n"); + dev_dbg(hdev->dev, "Going to reset device after release by user\n"); else - dev_info(hdev->dev, "Going to reset engines of inference device\n"); + dev_dbg(hdev->dev, "Going to reset engines of inference device\n"); } again: @@ -1270,7 +1474,10 @@ kill_processes: hdev->reset_info.needs_reset = false; - dev_notice(hdev->dev, "Successfully finished resetting the device\n"); + if (hard_reset) + dev_info(hdev->dev, "Successfully finished resetting the device\n"); + else + dev_dbg(hdev->dev, "Successfully finished resetting the device\n"); if (hard_reset) { hdev->reset_info.hard_reset_cnt++; @@ -1323,6 +1530,43 @@ out_err: return rc; } +static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event) +{ + mutex_lock(¬ifier_event->lock); + notifier_event->events_mask |= event; + if (notifier_event->eventfd) + eventfd_signal(notifier_event->eventfd, 1); + + mutex_unlock(¬ifier_event->lock); +} + +/* + * hl_notifier_event_send_all - notify all user processes via eventfd + * + * @hdev: pointer to habanalabs device structure + * @event: the occurred event + * Returns 0 for success or an error on failure. + */ +void hl_notifier_event_send_all(struct hl_device *hdev, u64 event) +{ + struct hl_fpriv *hpriv; + + mutex_lock(&hdev->fpriv_list_lock); + + list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) + hl_notifier_event_send(&hpriv->notifier_event, event); + + mutex_unlock(&hdev->fpriv_list_lock); + + /* control device */ + mutex_lock(&hdev->fpriv_ctrl_list_lock); + + list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node) + hl_notifier_event_send(&hpriv->notifier_event, event); + + mutex_unlock(&hdev->fpriv_ctrl_list_lock); +} + /* * hl_device_init - main initialization function for habanalabs device * diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 3262126cc7ca..828a36af5b14 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -18,8 +18,9 @@ static char *extract_fw_ver_from_str(const char *fw_str) { char *str, *fw_ver, *whitespace; + u32 ver_offset; - fw_ver = kmalloc(16, GFP_KERNEL); + fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL); if (!fw_ver) return NULL; @@ -29,9 +30,10 @@ static char *extract_fw_ver_from_str(const char *fw_str) /* Skip the fw- part */ str += 3; + ver_offset = str - fw_str; /* Copy until the next whitespace */ - whitespace = strnstr(str, " ", 15); + whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset); if (!whitespace) goto free_fw_ver; @@ -819,6 +821,54 @@ out: return rc; } +int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data) +{ + struct cpucp_monitor_dump *mon_dump_cpu_addr; + dma_addr_t mon_dump_dma_addr; + struct cpucp_packet pkt = {}; + size_t data_size; + __le32 *src_ptr; + u32 *dst_ptr; + u64 result; + int i, rc; + + data_size = sizeof(struct cpucp_monitor_dump); + mon_dump_cpu_addr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, data_size, + &mon_dump_dma_addr); + if (!mon_dump_cpu_addr) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for CPU-CP monitor-dump packet\n"); + return -ENOMEM; + } + + memset(mon_dump_cpu_addr, 0, data_size); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.addr = cpu_to_le64(mon_dump_dma_addr); + pkt.data_max_size = cpu_to_le32(data_size); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result); + if (rc) { + dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc); + goto out; + } + + /* result contains the actual size */ + src_ptr = (__le32 *) mon_dump_cpu_addr; + dst_ptr = data; + for (i = 0; i < (data_size / sizeof(u32)); i++) { + *dst_ptr = le32_to_cpu(*src_ptr); + src_ptr++; + dst_ptr++; + } + +out: + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr); + + return rc; +} + int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters) { @@ -1539,7 +1589,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev, le32_to_cpu(dyn_regs->cpu_cmd_status_to_host), status, FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status, - hdev->fw_poll_interval_usec, + hdev->fw_comms_poll_interval_usec, timeout); if (rc) { @@ -1909,7 +1959,7 @@ static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev, * @fwc: the firmware component * @fw_version: fw component's version string */ -static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev, +static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev, enum hl_fw_component fwc, const char *fw_version) { @@ -1933,23 +1983,33 @@ static void hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev, VERSION_MAX_LEN); if (preboot_ver && preboot_ver != prop->preboot_ver) { strscpy(btl_ver, prop->preboot_ver, - min((int) (preboot_ver - prop->preboot_ver), - 31)); + min((int) (preboot_ver - prop->preboot_ver), 31)); dev_info(hdev->dev, "%s\n", btl_ver); } preboot_ver = extract_fw_ver_from_str(prop->preboot_ver); if (preboot_ver) { - dev_info(hdev->dev, "preboot version %s\n", - preboot_ver); + char major[8]; + int rc; + + dev_info(hdev->dev, "preboot version %s\n", preboot_ver); + sprintf(major, "%.2s", preboot_ver); kfree(preboot_ver); + + rc = kstrtou32(major, 10, &hdev->fw_major_version); + if (rc) { + dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc); + return rc; + } } break; default: dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); - return; + return -EINVAL; } + + return 0; } /** @@ -2121,9 +2181,10 @@ static int hl_fw_dynamic_load_image(struct hl_device *hdev, goto release_fw; /* read preboot version */ - hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc, + rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc, fw_loader->dynamic_loader.comm_desc.cur_fw_ver); - + if (rc) + goto release_fw; /* update state according to boot stage */ if (cur_fwc == FW_COMP_BOOT_FIT) { @@ -2390,9 +2451,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev, goto protocol_err; /* read preboot version */ - hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT, + return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT, fw_loader->dynamic_loader.comm_desc.cur_fw_ver); - return 0; } /* load boot fit to FW */ diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 1edaf6ab67bd..b0b0f3f89865 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -21,6 +21,7 @@ #include <linux/hashtable.h> #include <linux/debugfs.h> #include <linux/rwsem.h> +#include <linux/eventfd.h> #include <linux/bitfield.h> #include <linux/genalloc.h> #include <linux/sched/signal.h> @@ -61,8 +62,10 @@ #define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */ #define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */ +#define HL_CPUCP_MON_DUMP_TIMEOUT_USEC 10000000 /* 10s */ #define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */ +#define HL_FW_COMMS_STATUS_PLDM_POLL_INTERVAL_USEC 1000000 /* 1s */ #define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */ @@ -394,18 +397,8 @@ enum hl_device_hw_state { * struct hl_mmu_properties - ASIC specific MMU address translation properties. * @start_addr: virtual start address of the memory region. * @end_addr: virtual end address of the memory region. - * @hop0_shift: shift of hop 0 mask. - * @hop1_shift: shift of hop 1 mask. - * @hop2_shift: shift of hop 2 mask. - * @hop3_shift: shift of hop 3 mask. - * @hop4_shift: shift of hop 4 mask. - * @hop5_shift: shift of hop 5 mask. - * @hop0_mask: mask to get the PTE address in hop 0. - * @hop1_mask: mask to get the PTE address in hop 1. - * @hop2_mask: mask to get the PTE address in hop 2. - * @hop3_mask: mask to get the PTE address in hop 3. - * @hop4_mask: mask to get the PTE address in hop 4. - * @hop5_mask: mask to get the PTE address in hop 5. + * @hop_shifts: array holds HOPs shifts. + * @hop_masks: array holds HOPs masks. * @last_mask: mask to get the bit indicating this is the last hop. * @pgt_size: size for page tables. * @page_size: default page size used to allocate memory. @@ -418,18 +411,8 @@ enum hl_device_hw_state { struct hl_mmu_properties { u64 start_addr; u64 end_addr; - u64 hop0_shift; - u64 hop1_shift; - u64 hop2_shift; - u64 hop3_shift; - u64 hop4_shift; - u64 hop5_shift; - u64 hop0_mask; - u64 hop1_mask; - u64 hop2_mask; - u64 hop3_mask; - u64 hop4_mask; - u64 hop5_mask; + u64 hop_shifts[MMU_HOP_MAX]; + u64 hop_masks[MMU_HOP_MAX]; u64 last_mask; u64 pgt_size; u32 page_size; @@ -486,8 +469,10 @@ struct hl_hints_range { * the device's MMU. * @dram_hints_align_mask: dram va hint addresses alignment mask which is used * for hints validity check. - * device_dma_offset_for_host_access: the offset to add to host DMA addresses - * to enable the device to access them. + * @device_dma_offset_for_host_access: the offset to add to host DMA addresses + * to enable the device to access them. + * @host_base_address: host physical start address for host DMA from device + * @host_end_address: host physical end address for host DMA from device * @max_freq_value: current max clk frequency. * @clk_pll_index: clock PLL index that specify which PLL determines the clock * we display to the user @@ -528,6 +513,10 @@ struct hl_hints_range { * @fw_app_cpu_boot_dev_sts1: bitmap representation of application security * status reported by FW, bit description can be * found in CPU_BOOT_DEV_STS1 + * @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for + * which the property supports_user_set_page_size is true + * (i.e. the DRAM supports multiple page sizes), otherwise + * it will shall be equal to dram_page_size. * @collective_first_sob: first sync object available for collective use * @collective_first_mon: first monitor available for collective use * @sync_stream_first_sob: first sync object available for sync stream use @@ -568,6 +557,7 @@ struct hl_hints_range { * @configurable_stop_on_err: is stop-on-error option configurable via debugfs. * @set_max_power_on_device_init: true if need to set max power in F/W on device init. * @supports_user_set_page_size: true if user can set the allocation page size. + * @dma_mask: the dma mask to be set for this device */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; @@ -599,6 +589,8 @@ struct asic_fixed_properties { u64 cb_va_end_addr; u64 dram_hints_align_mask; u64 device_dma_offset_for_host_access; + u64 host_base_address; + u64 host_end_address; u64 max_freq_value; u32 clk_pll_index; u32 mmu_pgt_size; @@ -626,6 +618,7 @@ struct asic_fixed_properties { u32 fw_bootfit_cpu_boot_dev_sts1; u32 fw_app_cpu_boot_dev_sts0; u32 fw_app_cpu_boot_dev_sts1; + u32 device_mem_alloc_default_page_size; u16 collective_first_sob; u16 collective_first_mon; u16 sync_stream_first_sob; @@ -654,6 +647,7 @@ struct asic_fixed_properties { u8 configurable_stop_on_err; u8 set_max_power_on_device_init; u8 supports_user_set_page_size; + u8 dma_mask; }; /** @@ -711,85 +705,102 @@ struct hl_cs_compl { */ /** - * struct hl_cb_mgr - describes a Command Buffer Manager. - * @cb_lock: protects cb_handles. - * @cb_handles: an idr to hold all command buffer handles. - */ -struct hl_cb_mgr { - spinlock_t cb_lock; - struct idr cb_handles; /* protected by cb_lock */ -}; - -/** - * struct hl_ts_mgr - describes the timestamp registration memory manager. - * @ts_lock: protects ts_handles. - * @ts_handles: an idr to hold all ts bufferes handles. - */ -struct hl_ts_mgr { - spinlock_t ts_lock; - struct idr ts_handles; -}; - -/** * struct hl_ts_buff - describes a timestamp buffer. - * @refcount: reference counter for usage of the buffer. - * @hdev: pointer to device this buffer belongs to. - * @mmap: true if the buff is currently mapped to user. * @kernel_buff_address: Holds the internal buffer's kernel virtual address. * @user_buff_address: Holds the user buffer's kernel virtual address. - * @id: the buffer ID. - * @mmap_size: Holds the buffer size that was mmaped. * @kernel_buff_size: Holds the internal kernel buffer size. - * @user_buff_size: Holds the user buffer size. */ struct hl_ts_buff { - struct kref refcount; - struct hl_device *hdev; - atomic_t mmap; void *kernel_buff_address; void *user_buff_address; - u32 id; - u32 mmap_size; u32 kernel_buff_size; - u32 user_buff_size; +}; + +struct hl_mmap_mem_buf; + +/** + * struct hl_mem_mgr - describes unified memory manager for mappable memory chunks. + * @dev: back pointer to the owning device + * @lock: protects handles + * @handles: an idr holding all active handles to the memory buffers in the system. + */ +struct hl_mem_mgr { + struct device *dev; + spinlock_t lock; + struct idr handles; +}; + +/** + * struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior + * @topic: string identifier used for logging + * @mem_id: memory type identifier, embedded in the handle and used to identify + * the memory type by handle. + * @alloc: callback executed on buffer allocation, shall allocate the memory, + * set it under buffer private, and set mappable size. + * @mmap: callback executed on mmap, must map the buffer to vma + * @release: callback executed on release, must free the resources used by the buffer + */ +struct hl_mmap_mem_buf_behavior { + const char *topic; + u64 mem_id; + + int (*alloc)(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args); + int (*mmap)(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args); + void (*release)(struct hl_mmap_mem_buf *buf); +}; + +/** + * struct hl_mmap_mem_buf - describes a single unified memory buffer + * @behavior: buffer behavior + * @mmg: back pointer to the unified memory manager + * @refcount: reference counter for buffer users + * @private: pointer to buffer behavior private data + * @mmap: atomic boolean indicating whether or not the buffer is mapped right now + * @real_mapped_size: the actual size of buffer mapped, after part of it may be released, + * may change at runtime. + * @mappable_size: the original mappable size of the buffer, does not change after + * the allocation. + * @handle: the buffer id in mmg handles store + */ +struct hl_mmap_mem_buf { + struct hl_mmap_mem_buf_behavior *behavior; + struct hl_mem_mgr *mmg; + struct kref refcount; + void *private; + atomic_t mmap; + u64 real_mapped_size; + u64 mappable_size; + u64 handle; }; /** * struct hl_cb - describes a Command Buffer. - * @refcount: reference counter for usage of the CB. * @hdev: pointer to device this CB belongs to. * @ctx: pointer to the CB owner's context. - * @lock: spinlock to protect mmap flows. + * @buf: back pointer to the parent mappable memory buffer * @debugfs_list: node in debugfs list of command buffers. * @pool_list: node in pool list of command buffers. * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to * the device's MMU. - * @id: the CB's ID. * @kernel_address: Holds the CB's kernel virtual address. * @bus_address: Holds the CB's DMA address. - * @mmap_size: Holds the CB's size that was mmaped. * @size: holds the CB's size. * @cs_cnt: holds number of CS that this CB participates in. - * @mmap: true if the CB is currently mmaped to user. * @is_pool: true if CB was acquired from the pool, false otherwise. * @is_internal: internaly allocated * @is_mmu_mapped: true if the CB is mapped to the device's MMU. */ struct hl_cb { - struct kref refcount; struct hl_device *hdev; struct hl_ctx *ctx; - spinlock_t lock; + struct hl_mmap_mem_buf *buf; struct list_head debugfs_list; struct list_head pool_list; struct list_head va_block_list; - u64 id; void *kernel_address; dma_addr_t bus_address; - u32 mmap_size; u32 size; atomic_t cs_cnt; - u8 mmap; u8 is_pool; u8 is_internal; u8 is_mmu_mapped; @@ -935,12 +946,12 @@ struct hl_user_interrupt { * struct timestamp_reg_free_node - holds the timestamp registration free objects node * @free_objects_node: node in the list free_obj_jobs * @cq_cb: pointer to cq command buffer to be freed - * @ts_buff: pointer to timestamp buffer to be freed + * @buf: pointer to timestamp buffer to be freed */ struct timestamp_reg_free_node { struct list_head free_objects_node; struct hl_cb *cq_cb; - struct hl_ts_buff *ts_buff; + struct hl_mmap_mem_buf *buf; }; /* struct timestamp_reg_work_obj - holds the timestamp registration free objects job @@ -957,8 +968,8 @@ struct timestamp_reg_work_obj { }; /* struct timestamp_reg_info - holds the timestamp registration related data. - * @ts_buff: pointer to the timestamp buffer which include both user/kernel buffers. - * relevant only when doing timestamps records registration. + * @buf: pointer to the timestamp buffer which include both user/kernel buffers. + * relevant only when doing timestamps records registration. * @cq_cb: pointer to CQ counter CB. * @timestamp_kernel_addr: timestamp handle address, where to set timestamp * relevant only when doing timestamps records @@ -969,7 +980,7 @@ struct timestamp_reg_work_obj { * allocating records dynamically. */ struct timestamp_reg_info { - struct hl_ts_buff *ts_buff; + struct hl_mmap_mem_buf *buf; struct hl_cb *cq_cb; u64 *timestamp_kernel_addr; u8 in_use; @@ -1068,6 +1079,15 @@ enum div_select_defs { DIV_SEL_DIVIDED_PLL = 3, }; +enum debugfs_access_type { + DEBUGFS_READ8, + DEBUGFS_WRITE8, + DEBUGFS_READ32, + DEBUGFS_WRITE32, + DEBUGFS_READ64, + DEBUGFS_WRITE64, +}; + enum pci_region { PCI_REGION_CFG, PCI_REGION_SRAM, @@ -1229,6 +1249,7 @@ struct fw_load_mgr { * its implementation is not trivial when the driver * is loaded in simulation mode (not upstreamed). * @scrub_device_mem: Scrub device memory given an address and size + * @scrub_device_dram: Scrub the dram memory of the device. * @get_int_queue_base: get the internal queue base address. * @test_queues: run simple test on all queues for sanity check. * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool. @@ -1236,18 +1257,14 @@ struct fw_load_mgr { * @asic_dma_pool_free: free small DMA allocation from pool. * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool. * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool. - * @hl_dma_unmap_sg: DMA unmap scatter-gather list. + * @hl_dma_unmap_sgtable: DMA unmap scatter-gather table. * @cs_parser: parse Command Submission. - * @asic_dma_map_sg: DMA map scatter-gather list. + * @asic_dma_map_sgtable: DMA map scatter-gather table. * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB. * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it. * @update_eq_ci: update event queue CI. * @context_switch: called upon ASID context switch. * @restore_phase_topology: clear all SOBs amd MONs. - * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory. - * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory. - * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory. - * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory. * @debugfs_read_dma: debug interface for reading up to 2MB from the device's * internal memory via DMA engine. * @add_device_attr: add ASIC specific device attributes. @@ -1257,8 +1274,8 @@ struct fw_load_mgr { * @write_pte: write MMU page table entry to DRAM. * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft * (L1 only) or hard (L0 & L1) flush. - * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with - * ASID-VA-size mask. + * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with ASID-VA-size mask. + * @mmu_prefetch_cache_range: pre-fetch specific MMU STLB cache lines with ASID-VA-size mask. * @send_heartbeat: send is-alive packet to CPU-CP and verify response. * @debug_coresight: perform certain actions on Coresight for debugging. * @is_device_idle: return true if device is idle, false otherwise. @@ -1267,6 +1284,7 @@ struct fw_load_mgr { * @hw_queues_unlock: release H/W queues lock. * @get_pci_id: retrieve PCI ID. * @get_eeprom_data: retrieve EEPROM data from F/W. + * @get_monitor_dump: retrieve monitor registers dump from F/W. * @send_cpu_message: send message to F/W. If the message is timedout, the * driver will eventually reset the device. The timeout can * be determined by the calling function or it can be 0 and @@ -1289,8 +1307,6 @@ struct fw_load_mgr { * @gen_wait_cb: Generate a wait CB. * @reset_sob: Reset a SOB. * @reset_sob_group: Reset SOB group - * @set_dma_mask_from_fw: set the DMA mask in the driver according to the - * firmware configuration * @get_device_time: Get the device time. * @collective_wait_init_cs: Generate collective master/slave packets * and place them in the relevant cs jobs @@ -1319,6 +1335,9 @@ struct fw_load_mgr { * @get_stream_master_qid_arr: get pointer to stream masters QID array * @is_valid_dram_page_size: return true if page size is supported in device * memory allocation, otherwise false. + * @get_valid_dram_page_orders: get valid device memory allocation page orders + * @access_dev_mem: access device memory + * @set_dram_bar_base: set the base of the DRAM BAR */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -1342,6 +1361,7 @@ struct hl_asic_funcs { void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, void *cpu_addr, dma_addr_t dma_handle); int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size); + int (*scrub_device_dram)(struct hl_device *hdev, u64 val); void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len); int (*test_queues)(struct hl_device *hdev); @@ -1353,12 +1373,11 @@ struct hl_asic_funcs { size_t size, dma_addr_t *dma_handle); void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev, size_t size, void *vaddr); - void (*hl_dma_unmap_sg)(struct hl_device *hdev, - struct scatterlist *sgl, int nents, + void (*hl_dma_unmap_sgtable)(struct hl_device *hdev, + struct sg_table *sgt, enum dma_data_direction dir); int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser); - int (*asic_dma_map_sg)(struct hl_device *hdev, - struct scatterlist *sgl, int nents, + int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir); u32 (*get_dma_desc_list_size)(struct hl_device *hdev, struct sg_table *sgt); @@ -1369,14 +1388,6 @@ struct hl_asic_funcs { void (*update_eq_ci)(struct hl_device *hdev, u32 val); int (*context_switch)(struct hl_device *hdev, u32 asid); void (*restore_phase_topology)(struct hl_device *hdev); - int (*debugfs_read32)(struct hl_device *hdev, u64 addr, - bool user_address, u32 *val); - int (*debugfs_write32)(struct hl_device *hdev, u64 addr, - bool user_address, u32 val); - int (*debugfs_read64)(struct hl_device *hdev, u64 addr, - bool user_address, u64 *val); - int (*debugfs_write64)(struct hl_device *hdev, u64 addr, - bool user_address, u64 val); int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr); void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp, @@ -1391,6 +1402,7 @@ struct hl_asic_funcs { u32 flags); int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, u32 flags, u32 asid, u64 va, u64 size); + int (*mmu_prefetch_cache_range)(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size); int (*send_heartbeat)(struct hl_device *hdev); int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data); bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr, @@ -1399,8 +1411,8 @@ struct hl_asic_funcs { void (*hw_queues_lock)(struct hl_device *hdev); void (*hw_queues_unlock)(struct hl_device *hdev); u32 (*get_pci_id)(struct hl_device *hdev); - int (*get_eeprom_data)(struct hl_device *hdev, void *data, - size_t max_size); + int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size); + int (*get_monitor_dump)(struct hl_device *hdev, void *data); int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, u16 len, u32 timeout, u64 *result); int (*pci_bars_map)(struct hl_device *hdev); @@ -1421,7 +1433,6 @@ struct hl_asic_funcs { struct hl_gen_wait_properties *prop); void (*reset_sob)(struct hl_device *hdev, void *data); void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group); - void (*set_dma_mask_from_fw)(struct hl_device *hdev); u64 (*get_device_time)(struct hl_device *hdev); int (*collective_wait_init_cs)(struct hl_cs *cs); int (*collective_wait_create_jobs)(struct hl_device *hdev, @@ -1445,6 +1456,12 @@ struct hl_asic_funcs { void (*set_pci_memory_regions)(struct hl_device *hdev); u32* (*get_stream_master_qid_arr)(void); bool (*is_valid_dram_page_size)(u32 page_size); + int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop, + u32 page_size, u32 *real_page_size, bool is_dram_addr); + void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info); + int (*access_dev_mem)(struct hl_device *hdev, struct pci_mem_region *region, + enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type); + u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr); }; @@ -1915,6 +1932,18 @@ struct hl_debug_params { bool enable; }; +/** + * struct hl_notifier_event - holds the notifier data structure + * @eventfd: the event file descriptor to raise the notifications + * @lock: mutex lock to protect the notifier data flows + * @events_mask: indicates the bitmap events + */ +struct hl_notifier_event { + struct eventfd_ctx *eventfd; + struct mutex lock; + u64 events_mask; +}; + /* * FILE PRIVATE STRUCTURE */ @@ -1926,25 +1955,25 @@ struct hl_debug_params { * @taskpid: current process ID. * @ctx: current executing context. TODO: remove for multiple ctx per process * @ctx_mgr: context manager to handle multiple context for this FD. - * @cb_mgr: command buffer manager to handle multiple buffers for this FD. - * @ts_mem_mgr: timestamp registration manager for alloc/free/map timestamp buffers. + * @mem_mgr: manager descriptor for memory exportable via mmap + * @notifier_event: notifier eventfd towards user process * @debugfs_list: list of relevant ASIC debugfs. * @dev_node: node in the device list of file private data * @refcount: number of related contexts. * @restore_phase_mutex: lock for context switch and restore phase. */ struct hl_fpriv { - struct hl_device *hdev; - struct file *filp; - struct pid *taskpid; - struct hl_ctx *ctx; - struct hl_ctx_mgr ctx_mgr; - struct hl_cb_mgr cb_mgr; - struct hl_ts_mgr ts_mem_mgr; - struct list_head debugfs_list; - struct list_head dev_node; - struct kref refcount; - struct mutex restore_phase_mutex; + struct hl_device *hdev; + struct file *filp; + struct pid *taskpid; + struct hl_ctx *ctx; + struct hl_ctx_mgr ctx_mgr; + struct hl_mem_mgr mem_mgr; + struct hl_notifier_event notifier_event; + struct list_head debugfs_list; + struct list_head dev_node; + struct kref refcount; + struct mutex restore_phase_mutex; }; @@ -1992,12 +2021,14 @@ struct hl_debugfs_entry { * @userptr_spinlock: protects userptr_list. * @ctx_mem_hash_list: list of available contexts with MMU mappings. * @ctx_mem_hash_spinlock: protects cb_list. - * @blob_desc: descriptor of blob + * @data_dma_blob_desc: data DMA descriptor of blob. + * @mon_dump_blob_desc: monitor dump descriptor of blob. * @state_dump: data of the system states in case of a bad cs. * @state_dump_sem: protects state_dump. * @addr: next address to read/write from/to in read/write32. * @mmu_addr: next virtual address to translate to physical address in mmu_show. * @userptr_lookup: the target user ptr to look up for on demand. + * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram * @mmu_asid: ASID to use while translating in mmu_show. * @state_dump_head: index of the latest state dump * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read. @@ -2021,12 +2052,14 @@ struct hl_dbg_device_entry { spinlock_t userptr_spinlock; struct list_head ctx_mem_hash_list; spinlock_t ctx_mem_hash_spinlock; - struct debugfs_blob_wrapper blob_desc; + struct debugfs_blob_wrapper data_dma_blob_desc; + struct debugfs_blob_wrapper mon_dump_blob_desc; char *state_dump[HL_STATE_DUMP_HIST_LEN]; struct rw_semaphore state_dump_sem; u64 addr; u64 mmu_addr; u64 userptr_lookup; + u64 memory_scrub_val; u32 mmu_asid; u32 state_dump_head; u8 i2c_bus; @@ -2442,6 +2475,24 @@ struct hl_mmu_funcs { }; /** + * struct hl_prefetch_work - prefetch work structure handler + * @pf_work: actual work struct. + * @ctx: compute context. + * @va: virtual address to pre-fetch. + * @size: pre-fetch size. + * @flags: operation flags. + * @asid: ASID for maintenance operation. + */ +struct hl_prefetch_work { + struct work_struct pf_work; + struct hl_ctx *ctx; + u64 va; + u64 size; + u32 flags; + u32 asid; +}; + +/* * number of user contexts allowed to call wait_for_multi_cs ioctl in * parallel */ @@ -2517,37 +2568,50 @@ struct hl_clk_throttle { }; /** - * struct last_error_session_info - info about last session in which CS timeout or - * razwi error occurred. - * @open_dev_timestamp: device open timestamp. - * @cs_timeout_timestamp: CS timeout timestamp. - * @razwi_timestamp: razwi timestamp. - * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the - * first (root cause) CS timeout will not be overwritten. - * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the - * first (root cause) razwi will not be overwritten. - * @cs_timeout_seq: CS timeout sequence number. - * @razwi_addr: address that caused razwi. - * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does - * not have engine id it will be set to U16_MAX. - * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible - * engines which one them caused the razwi. In that case, it will contain the - * second possible engine id, otherwise it will be set to U16_MAX. - * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id. - * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX. + * struct cs_timeout_info - info of last CS timeout occurred. + * @timestamp: CS timeout timestamp. + * @write_disable: if set writing to CS parameters in the structure is disabled so, + * the first (root cause) CS timeout will not be overwritten. + * @seq: CS timeout sequence number. + */ +struct cs_timeout_info { + ktime_t timestamp; + atomic_t write_disable; + u64 seq; +}; + +/** + * struct razwi_info - info about last razwi error occurred. + * @timestamp: razwi timestamp. + * @write_disable: if set writing to razwi parameters in the structure is disabled so the + * first (root cause) razwi will not be overwritten. + * @addr: address that caused razwi. + * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does + * not have engine id it will be set to U16_MAX. + * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible + * engines which one them caused the razwi. In that case, it will contain the + * second possible engine id, otherwise it will be set to U16_MAX. + * @non_engine_initiator: in case the initiator of the razwi does not have engine id. + * @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX. + */ +struct razwi_info { + ktime_t timestamp; + atomic_t write_disable; + u64 addr; + u16 engine_id_1; + u16 engine_id_2; + u8 non_engine_initiator; + u8 type; +}; + +/** + * struct last_error_session_info - info about last session errors occurred. + * @cs_timeout: CS timeout error last information. + * @razwi: razwi last information. */ struct last_error_session_info { - ktime_t open_dev_timestamp; - ktime_t cs_timeout_timestamp; - ktime_t razwi_timestamp; - atomic_t cs_write_disable; - atomic_t razwi_write_disable; - u64 cs_timeout_seq; - u64 razwi_addr; - u16 razwi_engine_id_1; - u16 razwi_engine_id_2; - u8 razwi_non_engine_initiator; - u8 razwi_type; + struct cs_timeout_info cs_timeout; + struct razwi_info razwi; }; /** @@ -2614,11 +2678,12 @@ struct hl_reset_info { * context. * @eq_wq: work queue of event queue for executing work in process context. * @ts_free_obj_wq: work queue for timestamp registration objects release. + * @pf_wq: work queue for MMU pre-fetch operations. * @kernel_ctx: Kernel driver context structure. * @kernel_queues: array of hl_hw_queue. * @cs_mirror_list: CS mirror list for TDR. * @cs_mirror_lock: protects cs_mirror_list. - * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs. + * @kernel_mem_mgr: memory manager for memory buffers with lifespan of driver. * @event_queue: event queue for IRQ from CPU-CP. * @dma_pool: DMA pool for small allocations. * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address. @@ -2656,9 +2721,10 @@ struct hl_reset_info { * @state_dump_specs: constants and dictionaries needed to dump system state. * @multi_cs_completion: array of multi-CS completion. * @clk_throttling: holds information about current/previous clock throttling events - * @reset_info: holds current device reset information. * @last_error: holds information about last session in which CS timeout or razwi error occurred. + * @reset_info: holds current device reset information. * @stream_master_qid_arr: pointer to array with QIDs of master streams. + * @fw_major_version: major version of current loaded preboot * @dram_used_mem: current DRAM memory consumption. * @timeout_jiffies: device CS timeout value. * @max_power: the max power of the device, as configured by the sysadmin. This @@ -2678,6 +2744,9 @@ struct hl_reset_info { * session. * @open_counter: number of successful device open operations. * @fw_poll_interval_usec: FW status poll interval in usec. + * used for CPU boot status + * @fw_comms_poll_interval_usec: FW comms/protocol poll interval in usec. + * used for COMMs protocols cmds(COMMS_STS_*) * @card_type: Various ASICs have several card types. This indicates the card * type of the current device. * @major: habanalabs kernel driver major. @@ -2686,6 +2755,7 @@ struct hl_reset_info { * @id_control: minor of the control device * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit * addresses. + * @is_in_dram_scrub: true if dram scrub operation is on going. * @disabled: is device disabled. * @late_init_done: is late init stage was done during initialization. * @hwmon_initialized: is H/W monitor sensors was initialized. @@ -2699,7 +2769,6 @@ struct hl_reset_info { * huge pages. * @init_done: is the initialization of the device done. * @device_cpu_disabled: is the device CPU disabled (due to timeouts) - * @dma_mask: the dma mask that was set for this device * @in_debug: whether the device is in a state where the profiling/tracing infrastructure * can be used. This indication is needed because in some ASICs we need to do * specific operations to enable that infrastructure. @@ -2721,6 +2790,8 @@ struct hl_reset_info { * cases where Linux was not loaded to device CPU * @supports_wait_for_multi_cs: true if wait for multi CS is supported * @is_compute_ctx_active: Whether there is an active compute context executing. + * @compute_ctx_in_release: true if the current compute context is being released. + * @supports_mmu_prefetch: true if prefetch is supported, otherwise false. */ struct hl_device { struct pci_dev *pdev; @@ -2742,11 +2813,12 @@ struct hl_device { struct workqueue_struct **cq_wq; struct workqueue_struct *eq_wq; struct workqueue_struct *ts_free_obj_wq; + struct workqueue_struct *pf_wq; struct hl_ctx *kernel_ctx; struct hl_hw_queue *kernel_queues; struct list_head cs_mirror_list; spinlock_t cs_mirror_lock; - struct hl_cb_mgr kernel_cb_mgr; + struct hl_mem_mgr kernel_mem_mgr; struct hl_eq event_queue; struct dma_pool *dma_pool; void *cpu_accessible_dma_mem; @@ -2797,6 +2869,7 @@ struct hl_device { struct hl_reset_info reset_info; u32 *stream_master_qid_arr; + u32 fw_major_version; atomic64_t dram_used_mem; u64 timeout_jiffies; u64 max_power; @@ -2807,12 +2880,15 @@ struct hl_device { u64 open_counter; u64 fw_poll_interval_usec; ktime_t last_successful_open_ktime; + u64 fw_comms_poll_interval_usec; + enum cpucp_card_types card_type; u32 major; u32 high_pll; u16 id; u16 id_control; u16 cpu_pci_msb_addr; + u8 is_in_dram_scrub; u8 disabled; u8 late_init_done; u8 hwmon_initialized; @@ -2823,7 +2899,6 @@ struct hl_device { u8 pmmu_huge_range; u8 init_done; u8 device_cpu_disabled; - u8 dma_mask; u8 in_debug; u8 cdev_sysfs_created; u8 stop_on_err; @@ -2839,6 +2914,8 @@ struct hl_device { u8 supports_wait_for_multi_cs; u8 stream_master_qid_arr_size; u8 is_compute_ctx_active; + u8 compute_ctx_in_release; + u8 supports_mmu_prefetch; /* Parameters for bring-up */ u64 nic_ports_mask; @@ -2971,6 +3048,14 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size, return ((address <= range_end_address) && (range_start_address <= end_address)); } +uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr); +int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir); +void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, + enum dma_data_direction dir); +int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val, + enum debugfs_access_type acc_type); +int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region, + enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type); int hl_device_open(struct inode *inode, struct file *filp); int hl_device_open_ctrl(struct inode *inode, struct file *filp); bool hl_device_operational(struct hl_device *hdev, @@ -3013,7 +3098,7 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv); void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx); int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx); void hl_ctx_do_release(struct kref *ref); -void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx); +void hl_ctx_get(struct hl_ctx *ctx); int hl_ctx_put(struct hl_ctx *ctx); struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev); struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq); @@ -3034,23 +3119,21 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization); int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sensors_arr); +void hl_notifier_event_send_all(struct hl_device *hdev, u64 event); + int hl_sysfs_init(struct hl_device *hdev); void hl_sysfs_fini(struct hl_device *hdev); int hl_hwmon_init(struct hl_device *hdev); void hl_hwmon_fini(struct hl_device *hdev); -int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, +int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg, struct hl_ctx *ctx, u32 cb_size, bool internal_cb, bool map_cb, u64 *handle); -int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle); -int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); +int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle); int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); -struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, - u32 handle); +struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle); void hl_cb_put(struct hl_cb *cb); -void hl_cb_mgr_init(struct hl_cb_mgr *mgr); -void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr); struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, bool internal_cb); int hl_cb_pool_init(struct hl_device *hdev); @@ -3104,6 +3187,8 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx); void hl_mmu_ctx_fini(struct hl_ctx *ctx); int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte); +int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop, + u32 page_size, u32 *real_page_size, bool is_dram_addr); int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte); int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, @@ -3112,6 +3197,7 @@ int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size); int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags); int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard, u32 flags, u32 asid, u64 va, u64 size); +int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size); u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte); u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop, u8 hop_idx, u64 hop_addr, u64 virt_addr); @@ -3149,6 +3235,7 @@ int hl_fw_cpucp_handshake(struct hl_device *hdev, u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg, u32 boot_err1_reg); int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size); +int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data); int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters); int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, @@ -3224,11 +3311,19 @@ __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset, const char *format, ...); char *hl_format_as_binary(char *buf, size_t buf_len, u32 n); const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type); -void hl_ts_mgr_init(struct hl_ts_mgr *mgr); -void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr); -int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); -struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr, u32 handle); -void hl_ts_put(struct hl_ts_buff *buff); + +void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg); +void hl_mem_mgr_fini(struct hl_mem_mgr *mmg); +int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma, + void *args); +struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, + u64 handle); +int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle); +int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf); +struct hl_mmap_mem_buf * +hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg, + struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp, + void *args); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index ca404ed9d9a7..37edb69a7255 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -134,13 +134,14 @@ int hl_device_open(struct inode *inode, struct file *filp) hpriv->hdev = hdev; filp->private_data = hpriv; hpriv->filp = filp; + + mutex_init(&hpriv->notifier_event.lock); mutex_init(&hpriv->restore_phase_mutex); kref_init(&hpriv->refcount); nonseekable_open(inode, filp); - hl_cb_mgr_init(&hpriv->cb_mgr); hl_ctx_mgr_init(&hpriv->ctx_mgr); - hl_ts_mgr_init(&hpriv->ts_mem_mgr); + hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr); hpriv->taskpid = get_task_pid(current, PIDTYPE_PID); @@ -150,7 +151,28 @@ int hl_device_open(struct inode *inode, struct file *filp) dev_err_ratelimited(hdev->dev, "Can't open %s because it is %s\n", dev_name(hdev->dev), hdev->status[status]); - rc = -EPERM; + + if (status == HL_DEVICE_STATUS_IN_RESET) + rc = -EAGAIN; + else + rc = -EPERM; + + goto out_err; + } + + if (hdev->is_in_dram_scrub) { + dev_dbg_ratelimited(hdev->dev, + "Can't open %s during dram scrub\n", + dev_name(hdev->dev)); + rc = -EAGAIN; + goto out_err; + } + + if (hdev->compute_ctx_in_release) { + dev_dbg_ratelimited(hdev->dev, + "Can't open %s because another user is still releasing it\n", + dev_name(hdev->dev)); + rc = -EAGAIN; goto out_err; } @@ -173,8 +195,8 @@ int hl_device_open(struct inode *inode, struct file *filp) hl_debugfs_add_file(hpriv); - atomic_set(&hdev->last_error.cs_write_disable, 0); - atomic_set(&hdev->last_error.razwi_write_disable, 0); + atomic_set(&hdev->last_error.cs_timeout.write_disable, 0); + atomic_set(&hdev->last_error.razwi.write_disable, 0); hdev->open_counter++; hdev->last_successful_open_jif = jiffies; @@ -184,11 +206,11 @@ int hl_device_open(struct inode *inode, struct file *filp) out_err: mutex_unlock(&hdev->fpriv_list_lock); - hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); - hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr); + hl_mem_mgr_fini(&hpriv->mem_mgr); hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); filp->private_data = NULL; mutex_destroy(&hpriv->restore_phase_mutex); + mutex_destroy(&hpriv->notifier_event.lock); put_pid(hpriv->taskpid); kfree(hpriv); @@ -222,9 +244,11 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp) hpriv->hdev = hdev; filp->private_data = hpriv; hpriv->filp = filp; + + mutex_init(&hpriv->notifier_event.lock); nonseekable_open(inode, filp); - hpriv->taskpid = find_get_pid(current->pid); + hpriv->taskpid = get_task_pid(current, PIDTYPE_PID); mutex_lock(&hdev->fpriv_ctrl_list_lock); @@ -288,6 +312,7 @@ static int fixup_device_params(struct hl_device *hdev) hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type); hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC; + hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC; hdev->stop_on_err = true; hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN; @@ -296,9 +321,6 @@ static int fixup_device_params(struct hl_device *hdev) /* Enable only after the initialization of the device */ hdev->disabled = true; - /* Set default DMA mask to 32 bits */ - hdev->dma_mask = 32; - return 0; } diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index c13a3c2a7013..c7864d6bb0a1 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -76,6 +76,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) if (hw_ip.dram_size > PAGE_SIZE) hw_ip.dram_enabled = 1; hw_ip.dram_page_size = prop->dram_page_size; + hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size; hw_ip.num_of_events = prop->num_of_events; memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, @@ -115,6 +116,23 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate, return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; } +static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + u32 max_size = args->return_size; + u64 events_mask; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((max_size < sizeof(u64)) || (!out)) + return -EINVAL; + + mutex_lock(&hpriv->notifier_event.lock); + events_mask = hpriv->notifier_event.events_mask; + hpriv->notifier_event.events_mask = 0; + mutex_unlock(&hpriv->notifier_event.lock); + + return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0; +} + static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { struct hl_device *hdev = hpriv->hdev; @@ -497,6 +515,8 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args) open_stats_info.last_open_period_ms = jiffies64_to_msecs( hdev->last_open_session_duration_jif); open_stats_info.open_counter = hdev->open_counter; + open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active; + open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release; return copy_to_user(out, &open_stats_info, min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0; @@ -549,7 +569,7 @@ static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *a if ((!max_size) || (!out)) return -EINVAL; - info.timestamp = ktime_to_ns(hdev->last_error.open_dev_timestamp); + info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime); return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; } @@ -564,8 +584,8 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args) if ((!max_size) || (!out)) return -EINVAL; - info.seq = hdev->last_error.cs_timeout_seq; - info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout_timestamp); + info.seq = hdev->last_error.cs_timeout.seq; + info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout.timestamp); return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; } @@ -580,16 +600,74 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) if ((!max_size) || (!out)) return -EINVAL; - info.timestamp = ktime_to_ns(hdev->last_error.razwi_timestamp); - info.addr = hdev->last_error.razwi_addr; - info.engine_id_1 = hdev->last_error.razwi_engine_id_1; - info.engine_id_2 = hdev->last_error.razwi_engine_id_2; - info.no_engine_id = hdev->last_error.razwi_non_engine_initiator; - info.error_type = hdev->last_error.razwi_type; + info.timestamp = ktime_to_ns(hdev->last_error.razwi.timestamp); + info.addr = hdev->last_error.razwi.addr; + info.engine_id_1 = hdev->last_error.razwi.engine_id_1; + info.engine_id_2 = hdev->last_error.razwi.engine_id_2; + info.no_engine_id = hdev->last_error.razwi.non_engine_initiator; + info.error_type = hdev->last_error.razwi.type; + + return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; +} + +static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct hl_info_dev_memalloc_page_sizes info = {0}; + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + + if ((!max_size) || (!out)) + return -EINVAL; + + /* + * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2" + * pages (unlike some of the ASICs before supporting multiple page sizes). + * For this reason for all ASICs that not support multiple page size the function will + * return an empty bitmask indicating that multiple page sizes is not supported. + */ + hdev->asic_funcs->get_valid_dram_page_orders(&info); return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; } +static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + int rc; + + /* check if there is already a registered on that process */ + mutex_lock(&hpriv->notifier_event.lock); + if (hpriv->notifier_event.eventfd) { + mutex_unlock(&hpriv->notifier_event.lock); + return -EINVAL; + } + + hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd); + if (IS_ERR(hpriv->notifier_event.eventfd)) { + rc = PTR_ERR(hpriv->notifier_event.eventfd); + hpriv->notifier_event.eventfd = NULL; + mutex_unlock(&hpriv->notifier_event.lock); + return rc; + } + + mutex_unlock(&hpriv->notifier_event.lock); + return 0; +} + +static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + mutex_lock(&hpriv->notifier_event.lock); + if (!hpriv->notifier_event.eventfd) { + mutex_unlock(&hpriv->notifier_event.lock); + return -EINVAL; + } + + eventfd_ctx_put(hpriv->notifier_event.eventfd); + hpriv->notifier_event.eventfd = NULL; + mutex_unlock(&hpriv->notifier_event.lock); + return 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -640,6 +718,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_RAZWI_EVENT: return razwi_info(hpriv, args); + case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES: + return dev_mem_alloc_page_sizes_info(hpriv, args); + + case HL_INFO_GET_EVENTS: + return events_info(hpriv, args); + default: break; } @@ -690,6 +774,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_DRAM_PENDING_ROWS: return dram_pending_rows_info(hpriv, args); + case HL_INFO_REGISTER_EVENTFD: + return eventfd_register(hpriv, args); + + case HL_INFO_UNREGISTER_EVENTFD: + return eventfd_unregister(hpriv, args); + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -EINVAL; diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c index e2bc128f2291..8500e15ef743 100644 --- a/drivers/misc/habanalabs/common/irq.c +++ b/drivers/misc/habanalabs/common/irq.c @@ -152,11 +152,11 @@ static void hl_ts_free_objects(struct work_struct *work) struct hl_device *hdev = job->hdev; list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) { - dev_dbg(hdev->dev, "About to put refcount to ts_buff (%p) cq_cb(%p)\n", - free_obj->ts_buff, + dev_dbg(hdev->dev, "About to put refcount to buf (%p) cq_cb(%p)\n", + free_obj->buf, free_obj->cq_cb); - hl_ts_put(free_obj->ts_buff); + hl_mmap_mem_buf_put(free_obj->buf); hl_cb_put(free_obj->cq_cb); kfree(free_obj); } @@ -210,7 +210,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi /* Putting the refcount for ts_buff and cq_cb objects will be handled * in workqueue context, just add job to free_list. */ - free_node->ts_buff = pend->ts_reg_info.ts_buff; + free_node->buf = pend->ts_reg_info.buf; free_node->cq_cb = pend->ts_reg_info.cq_cb; list_add(&free_node->free_objects_node, *free_list); @@ -244,7 +244,7 @@ static void handle_user_cq(struct hl_device *hdev, list_for_each_entry_safe(pend, temp_pend, &user_cq->wait_list_head, wait_list_node) { if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) || !pend->cq_kernel_addr) { - if (pend->ts_reg_info.ts_buff) { + if (pend->ts_reg_info.buf) { if (!reg_node_handle_fail) { rc = handle_registration_node(hdev, pend, &ts_reg_free_list_head); @@ -282,10 +282,6 @@ irqreturn_t hl_irq_handler_user_cq(int irq, void *arg) struct hl_user_interrupt *user_cq = arg; struct hl_device *hdev = user_cq->hdev; - dev_dbg(hdev->dev, - "got user completion interrupt id %u", - user_cq->interrupt_id); - /* Handle user cq interrupts registered on all interrupts */ handle_user_cq(hdev, &hdev->common_user_interrupt); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index a13506dd8119..663dd7e589d4 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -41,7 +41,7 @@ static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u return -EINVAL; } } else { - psize = hdev->asic_prop.dram_page_size; + psize = prop->device_mem_alloc_default_page_size; } *page_size = psize; @@ -117,7 +117,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, paddr = gen_pool_alloc(vm->dram_pg_pool, total_size); if (!paddr) { dev_err(hdev->dev, - "failed to allocate %llu contiguous pages with total size of %llu\n", + "Cannot allocate %llu contiguous pages with total size of %llu\n", num_pgs, total_size); return -ENOMEM; } @@ -156,9 +156,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, else phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool, page_size); + if (!phys_pg_pack->pages[i]) { dev_err(hdev->dev, - "Failed to allocate device memory (out of memory)\n"); + "Cannot allocate device memory (out of memory)\n"); rc = -ENOMEM; goto page_err; } @@ -237,19 +238,18 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size, goto pin_err; } - rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, - userptr->sgt->nents, DMA_BIDIRECTIONAL); - if (rc) { - dev_err(hdev->dev, "failed to map sgt with DMA region\n"); - goto dma_map_err; - } - userptr->dma_mapped = true; userptr->dir = DMA_BIDIRECTIONAL; userptr->vm_type = VM_TYPE_USERPTR; *p_userptr = userptr; + rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto dma_map_err; + } + return 0; dma_map_err: @@ -900,7 +900,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, * consecutive block. */ total_npages = 0; - for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + for_each_sgtable_dma_sg(userptr->sgt, sg, i) { npages = hl_get_sg_info(sg, &dma_addr); total_npages += npages; @@ -929,7 +929,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, phys_pg_pack->total_size = total_npages * page_size; j = 0; - for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + for_each_sgtable_dma_sg(userptr->sgt, sg, i) { npages = hl_get_sg_info(sg, &dma_addr); /* align down to physical page size and save the offset */ @@ -1102,21 +1102,24 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, * map a device virtual block to this pages and return the start address of * this block. */ -static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, - u64 *device_addr) +static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr) { - struct hl_device *hdev = ctx->hdev; - struct hl_vm *vm = &hdev->vm; struct hl_vm_phys_pg_pack *phys_pg_pack; + enum hl_va_range_type va_range_type = 0; + struct hl_device *hdev = ctx->hdev; struct hl_userptr *userptr = NULL; + u32 handle = 0, va_block_align; struct hl_vm_hash_node *hnode; + struct hl_vm *vm = &hdev->vm; struct hl_va_range *va_range; - enum vm_type *vm_type; + bool is_userptr, do_prefetch; u64 ret_vaddr, hint_addr; - u32 handle = 0, va_block_align; + enum vm_type *vm_type; int rc; - bool is_userptr = args->flags & HL_MEM_USERPTR; - enum hl_va_range_type va_range_type = 0; + + /* set map flags */ + is_userptr = args->flags & HL_MEM_USERPTR; + do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH); /* Assume failure */ *device_addr = 0; @@ -1241,19 +1244,27 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack); if (rc) { - mutex_unlock(&ctx->mmu_lock); - dev_err(hdev->dev, "mapping page pack failed for handle %u\n", - handle); + dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle); goto map_err; } rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV, ctx->asid, ret_vaddr, phys_pg_pack->total_size); + if (rc) + goto map_err; mutex_unlock(&ctx->mmu_lock); - if (rc) - goto map_err; + /* + * prefetch is done upon user's request. it is performed in WQ as and so can + * be outside the MMU lock. the operation itself is already protected by the mmu lock + */ + if (do_prefetch) { + rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr, + phys_pg_pack->total_size); + if (rc) + goto map_err; + } ret_vaddr += phys_pg_pack->offset; @@ -1272,6 +1283,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, return rc; map_err: + mutex_unlock(&ctx->mmu_lock); + if (add_va_block(hdev, va_range, ret_vaddr, ret_vaddr + phys_pg_pack->total_size - 1)) dev_warn(hdev->dev, @@ -1509,7 +1522,7 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) vma->vm_ops = &hw_block_vm_ops; vma->vm_private_data = lnode; - hl_ctx_get(hdev, ctx); + hl_ctx_get(ctx); rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size); if (rc) { @@ -1819,7 +1832,7 @@ static int export_dmabuf_common(struct hl_ctx *ctx, } hl_dmabuf->ctx = ctx; - hl_ctx_get(hdev, hl_dmabuf->ctx); + hl_ctx_get(hl_dmabuf->ctx); *dmabuf_fd = fd; @@ -2076,164 +2089,34 @@ out: return rc; } -static void ts_buff_release(struct kref *ref) -{ - struct hl_ts_buff *buff; - - buff = container_of(ref, struct hl_ts_buff, refcount); - - vfree(buff->kernel_buff_address); - vfree(buff->user_buff_address); - kfree(buff); -} - -struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr, - u32 handle) -{ - struct hl_ts_buff *buff; - - spin_lock(&mgr->ts_lock); - buff = idr_find(&mgr->ts_handles, handle); - if (!buff) { - spin_unlock(&mgr->ts_lock); - dev_warn(hdev->dev, - "TS buff get failed, no match to handle 0x%x\n", handle); - return NULL; - } - kref_get(&buff->refcount); - spin_unlock(&mgr->ts_lock); - - return buff; -} - -void hl_ts_put(struct hl_ts_buff *buff) +static void ts_buff_release(struct hl_mmap_mem_buf *buf) { - kref_put(&buff->refcount, ts_buff_release); -} - -static void buff_vm_close(struct vm_area_struct *vma) -{ - struct hl_ts_buff *buff = (struct hl_ts_buff *) vma->vm_private_data; - long new_mmap_size; - - new_mmap_size = buff->mmap_size - (vma->vm_end - vma->vm_start); + struct hl_ts_buff *ts_buff = buf->private; - if (new_mmap_size > 0) { - buff->mmap_size = new_mmap_size; - return; - } - - atomic_set(&buff->mmap, 0); - hl_ts_put(buff); - vma->vm_private_data = NULL; + vfree(ts_buff->kernel_buff_address); + vfree(ts_buff->user_buff_address); + kfree(ts_buff); } -static const struct vm_operations_struct ts_buff_vm_ops = { - .close = buff_vm_close -}; - -int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) +static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args) { - struct hl_device *hdev = hpriv->hdev; - struct hl_ts_buff *buff; - u32 handle, user_buff_size; - int rc; - - /* We use the page offset to hold the idr and thus we need to clear - * it before doing the mmap itself - */ - handle = vma->vm_pgoff; - vma->vm_pgoff = 0; - - buff = hl_ts_get(hdev, &hpriv->ts_mem_mgr, handle); - if (!buff) { - dev_err(hdev->dev, - "TS buff mmap failed, no match to handle 0x%x\n", handle); - return -EINVAL; - } - - /* Validation check */ - user_buff_size = vma->vm_end - vma->vm_start; - if (user_buff_size != ALIGN(buff->user_buff_size, PAGE_SIZE)) { - dev_err(hdev->dev, - "TS buff mmap failed, mmap size 0x%x != 0x%x buff size\n", - user_buff_size, ALIGN(buff->user_buff_size, PAGE_SIZE)); - rc = -EINVAL; - goto put_buff; - } - -#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK - if (!access_ok(VERIFY_WRITE, - (void __user *) (uintptr_t) vma->vm_start, user_buff_size)) { -#else - if (!access_ok((void __user *) (uintptr_t) vma->vm_start, - user_buff_size)) { -#endif - dev_err(hdev->dev, - "user pointer is invalid - 0x%lx\n", - vma->vm_start); - - rc = -EINVAL; - goto put_buff; - } + struct hl_ts_buff *ts_buff = buf->private; - if (atomic_cmpxchg(&buff->mmap, 0, 1)) { - dev_err(hdev->dev, "TS buff memory mmap failed, already mmaped to user\n"); - rc = -EINVAL; - goto put_buff; - } - - vma->vm_ops = &ts_buff_vm_ops; - vma->vm_private_data = buff; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE; - rc = remap_vmalloc_range(vma, buff->user_buff_address, 0); - if (rc) { - atomic_set(&buff->mmap, 0); - goto put_buff; - } - - buff->mmap_size = buff->user_buff_size; - vma->vm_pgoff = handle; - - return 0; - -put_buff: - hl_ts_put(buff); - return rc; -} - -void hl_ts_mgr_init(struct hl_ts_mgr *mgr) -{ - spin_lock_init(&mgr->ts_lock); - idr_init(&mgr->ts_handles); + return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0); } -void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr) -{ - struct hl_ts_buff *buff; - struct idr *idp; - u32 id; - - idp = &mgr->ts_handles; - - idr_for_each_entry(idp, buff, id) { - if (kref_put(&buff->refcount, ts_buff_release) != 1) - dev_err(hdev->dev, "TS buff handle %d for CTX is still alive\n", - id); - } - - idr_destroy(&mgr->ts_handles); -} - -static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_elements) +static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args) { struct hl_ts_buff *ts_buff = NULL; - u32 size; + u32 size, num_elements; void *p; + num_elements = *(u32 *)args; + ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL); if (!ts_buff) - return NULL; + return -ENOMEM; /* Allocate the user buffer */ size = num_elements * sizeof(u64); @@ -2242,7 +2125,7 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme goto free_mem; ts_buff->user_buff_address = p; - ts_buff->user_buff_size = size; + buf->mappable_size = size; /* Allocate the internal kernel buffer */ size = num_elements * sizeof(struct hl_user_pending_interrupt); @@ -2253,15 +2136,25 @@ static struct hl_ts_buff *hl_ts_alloc_buff(struct hl_device *hdev, u32 num_eleme ts_buff->kernel_buff_address = p; ts_buff->kernel_buff_size = size; - return ts_buff; + buf->private = ts_buff; + + return 0; free_user_buff: vfree(ts_buff->user_buff_address); free_mem: kfree(ts_buff); - return NULL; + return -ENOMEM; } +static struct hl_mmap_mem_buf_behavior hl_ts_behavior = { + .topic = "TS", + .mem_id = HL_MMAP_TYPE_TS_BUFF, + .mmap = hl_ts_mmap, + .alloc = hl_ts_alloc_buf, + .release = ts_buff_release, +}; + /** * allocate_timestamps_buffers() - allocate timestamps buffers * This function will allocate ts buffer that will later on be mapped to the user @@ -2278,54 +2171,22 @@ free_mem: */ static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle) { - struct hl_ts_mgr *ts_mgr = &hpriv->ts_mem_mgr; - struct hl_device *hdev = hpriv->hdev; - struct hl_ts_buff *ts_buff; - int rc = 0; + struct hl_mem_mgr *mmg = &hpriv->mem_mgr; + struct hl_mmap_mem_buf *buf; if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) { - dev_err(hdev->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n", + dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n", args->num_of_elements, TS_MAX_ELEMENTS_NUM); return -EINVAL; } - /* Allocate ts buffer object - * This object will contain two buffers one that will be mapped to the user - * and another internal buffer for the driver use only, which won't be mapped - * to the user. - */ - ts_buff = hl_ts_alloc_buff(hdev, args->num_of_elements); - if (!ts_buff) { - rc = -ENOMEM; - goto out_err; - } - - spin_lock(&ts_mgr->ts_lock); - rc = idr_alloc(&ts_mgr->ts_handles, ts_buff, 1, 0, GFP_ATOMIC); - spin_unlock(&ts_mgr->ts_lock); - if (rc < 0) { - dev_err(hdev->dev, "Failed to allocate IDR for a new ts buffer\n"); - goto release_ts_buff; - } - - ts_buff->id = rc; - ts_buff->hdev = hdev; - - kref_init(&ts_buff->refcount); - - /* idr is 32-bit so we can safely OR it with a mask that is above 32 bit */ - *handle = (u64) ts_buff->id | HL_MMAP_TYPE_TS_BUFF; - *handle <<= PAGE_SHIFT; + buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements); + if (!buf) + return -ENOMEM; - dev_dbg(hdev->dev, "Created ts buff object handle(%u)\n", ts_buff->id); + *handle = buf->handle; return 0; - -release_ts_buff: - kref_put(&ts_buff->refcount, ts_buff_release); -out_err: - *handle = 0; - return rc; } int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) @@ -2587,9 +2448,7 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) hl_debugfs_remove_userptr(hdev, userptr); if (userptr->dma_mapped) - hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl, - userptr->sgt->nents, - userptr->dir); + hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir); unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true); kvfree(userptr->pages); diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c new file mode 100644 index 000000000000..ea5f2bd31b0a --- /dev/null +++ b/drivers/misc/habanalabs/common/memory_mgr.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2022 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +/** + * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to + * the buffer descriptor. + * + * @mmg: parent unifed memory manager + * @handle: requested buffer handle + * + * Find the buffer in the store and return a pointer to its descriptor. + * Increase buffer refcount. If not found - return NULL. + */ +struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle) +{ + struct hl_mmap_mem_buf *buf; + + spin_lock(&mmg->lock); + buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT)); + if (!buf) { + spin_unlock(&mmg->lock); + dev_warn(mmg->dev, + "Buff get failed, no match to handle %#llx\n", handle); + return NULL; + } + kref_get(&buf->refcount); + spin_unlock(&mmg->lock); + return buf; +} + +/** + * hl_mmap_mem_buf_destroy - destroy the unused buffer + * + * @buf: memory manager buffer descriptor + * + * Internal function, used as a final step of buffer release. Shall be invoked + * only when the buffer is no longer in use (removed from idr). Will call the + * release callback (if applicable), and free the memory. + */ +static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf) +{ + if (buf->behavior->release) + buf->behavior->release(buf); + + kfree(buf); +} + +/** + * hl_mmap_mem_buf_release - release buffer + * + * @kref: kref that reached 0. + * + * Internal function, used as a kref release callback, when the last user of + * the buffer is released. Shall be called from an interrupt context. + */ +static void hl_mmap_mem_buf_release(struct kref *kref) +{ + struct hl_mmap_mem_buf *buf = + container_of(kref, struct hl_mmap_mem_buf, refcount); + + spin_lock(&buf->mmg->lock); + idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); + spin_unlock(&buf->mmg->lock); + + hl_mmap_mem_buf_destroy(buf); +} + +/** + * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr + * + * @kref: kref that reached 0. + * + * Internal function, used for kref put by handle. Assumes mmg lock is taken. + * Will remove the buffer from idr, without destroying it. + */ +static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref) +{ + struct hl_mmap_mem_buf *buf = + container_of(kref, struct hl_mmap_mem_buf, refcount); + + idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); +} + +/** + * hl_mmap_mem_buf_put - decrease the reference to the buffer + * + * @buf: memory manager buffer descriptor + * + * Decrease the reference to the buffer, and release it if it was the last one. + * Shall be called from an interrupt context. + */ +int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf) +{ + return kref_put(&buf->refcount, hl_mmap_mem_buf_release); +} + +/** + * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the + * given handle. + * + * @mmg: parent unifed memory manager + * @handle: requested buffer handle + * + * Decrease the reference to the buffer, and release it if it was the last one. + * Shall not be called from an interrupt context. Return -EINVAL if handle was + * not found, else return the put outcome (0 or 1). + */ +int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle) +{ + struct hl_mmap_mem_buf *buf; + + spin_lock(&mmg->lock); + buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT)); + if (!buf) { + spin_unlock(&mmg->lock); + dev_dbg(mmg->dev, + "Buff put failed, no match to handle %#llx\n", handle); + return -EINVAL; + } + + if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) { + spin_unlock(&mmg->lock); + hl_mmap_mem_buf_destroy(buf); + return 1; + } + + spin_unlock(&mmg->lock); + return 0; +} + +/** + * @hl_mmap_mem_buf_alloc - allocate a new mappable buffer + * + * @mmg: parent unifed memory manager + * @behavior: behavior object describing this buffer polymorphic behavior + * @gfp: gfp flags to use for the memory allocations + * @args: additional args passed to behavior->alloc + * + * Allocate and register a new memory buffer inside the give memory manager. + * Return the pointer to the new buffer on success or NULL on failure. + */ +struct hl_mmap_mem_buf * +hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg, + struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp, + void *args) +{ + struct hl_mmap_mem_buf *buf; + int rc; + + buf = kzalloc(sizeof(*buf), gfp); + if (!buf) + return NULL; + + spin_lock(&mmg->lock); + rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC); + spin_unlock(&mmg->lock); + if (rc < 0) { + dev_err(mmg->dev, + "%s: Failed to allocate IDR for a new buffer, rc=%d\n", + behavior->topic, rc); + goto free_buf; + } + + buf->mmg = mmg; + buf->behavior = behavior; + buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT); + kref_init(&buf->refcount); + + rc = buf->behavior->alloc(buf, gfp, args); + if (rc) { + dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n", + behavior->topic, rc); + goto remove_idr; + } + + return buf; + +remove_idr: + spin_lock(&mmg->lock); + idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); + spin_unlock(&mmg->lock); +free_buf: + kfree(buf); + return NULL; +} + +/** + * hl_mmap_mem_buf_vm_close - handle mmap close + * + * @vma: the vma object for which mmap was closed. + * + * Put the memory buffer if it is no longer mapped. + */ +static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma) +{ + struct hl_mmap_mem_buf *buf = + (struct hl_mmap_mem_buf *)vma->vm_private_data; + long new_mmap_size; + + new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start); + + if (new_mmap_size > 0) { + buf->real_mapped_size = new_mmap_size; + return; + } + + atomic_set(&buf->mmap, 0); + hl_mmap_mem_buf_put(buf); + vma->vm_private_data = NULL; +} + +static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = { + .close = hl_mmap_mem_buf_vm_close +}; + +/** + * hl_mem_mgr_mmap - map the given buffer to the user + * + * @mmg: unifed memory manager + * @vma: the vma object for which mmap was closed. + * @args: additional args passed to behavior->mmap + * + * Map the buffer specified by the vma->vm_pgoff to the given vma. + */ +int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma, + void *args) +{ + struct hl_mmap_mem_buf *buf; + u64 user_mem_size; + u64 handle; + int rc; + + /* We use the page offset to hold the idr and thus we need to clear + * it before doing the mmap itself + */ + handle = vma->vm_pgoff << PAGE_SHIFT; + vma->vm_pgoff = 0; + + /* Reference was taken here */ + buf = hl_mmap_mem_buf_get(mmg, handle); + if (!buf) { + dev_err(mmg->dev, + "Memory mmap failed, no match to handle %#llx\n", handle); + return -EINVAL; + } + + /* Validation check */ + user_mem_size = vma->vm_end - vma->vm_start; + if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) { + dev_err(mmg->dev, + "%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n", + buf->behavior->topic, user_mem_size, buf->mappable_size); + rc = -EINVAL; + goto put_mem; + } + +#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK + if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start, + user_mem_size)) { +#else + if (!access_ok((void __user *)(uintptr_t)vma->vm_start, + user_mem_size)) { +#endif + dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n", + buf->behavior->topic, vma->vm_start); + + rc = -EINVAL; + goto put_mem; + } + + if (atomic_cmpxchg(&buf->mmap, 0, 1)) { + dev_err(mmg->dev, + "%s, Memory mmap failed, already mmaped to user\n", + buf->behavior->topic); + rc = -EINVAL; + goto put_mem; + } + + vma->vm_ops = &hl_mmap_mem_buf_vm_ops; + + /* Note: We're transferring the memory reference to vma->vm_private_data here. */ + + vma->vm_private_data = buf; + + rc = buf->behavior->mmap(buf, vma, args); + if (rc) { + atomic_set(&buf->mmap, 0); + goto put_mem; + } + + buf->real_mapped_size = buf->mappable_size; + vma->vm_pgoff = handle >> PAGE_SHIFT; + + return 0; + +put_mem: + hl_mmap_mem_buf_put(buf); + return rc; +} + +/** + * hl_mem_mgr_init - initialize unified memory manager + * + * @dev: owner device pointer + * @mmg: structure to initialize + * + * Initialize an instance of unified memory manager + */ +void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg) +{ + mmg->dev = dev; + spin_lock_init(&mmg->lock); + idr_init(&mmg->handles); +} + +/** + * hl_mem_mgr_fini - release unified memory manager + * + * @mmg: parent unifed memory manager + * + * Release the unified memory manager. Shall be called from an interrupt context. + */ +void hl_mem_mgr_fini(struct hl_mem_mgr *mmg) +{ + struct hl_mmap_mem_buf *buf; + struct idr *idp; + const char *topic; + u32 id; + + idp = &mmg->handles; + + idr_for_each_entry(idp, buf, id) { + topic = buf->behavior->topic; + if (hl_mmap_mem_buf_put(buf) != 1) + dev_err(mmg->dev, + "%s: Buff handle %u for CTX is still alive\n", + topic, id); + } + + /* TODO: can it happen that some buffer is still in use at this point? */ + + idr_destroy(&mmg->handles); +} diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c index 810b73421ce1..f3734718d94f 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -9,6 +9,20 @@ #include "../habanalabs.h" +/** + * hl_mmu_get_funcs() - get MMU functions structure + * @hdev: habanalabs device structure. + * @pgt_residency: page table residency. + * @is_dram_addr: true if we need HMMU functions + * + * @return appropriate MMU functions structure + */ +static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency, + bool is_dram_addr) +{ + return &hdev->mmu_func[pgt_residency]; +} + bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr) { struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -122,6 +136,53 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) } /* + * hl_mmu_get_real_page_size - get real page size to use in map/unmap operation + * + * @hdev: pointer to device data. + * @mmu_prop: MMU properties. + * @page_size: page size + * @real_page_size: set here the actual page size to use for the operation + * @is_dram_addr: true if DRAM address, otherwise false. + * + * @return 0 on success, otherwise non 0 error code + * + * note that this is general implementation that can fit most MMU arch. but as this is used as an + * MMU function: + * 1. it shall not be called directly- only from mmu_func structure instance + * 2. each MMU may modify the implementation internally + */ +int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop, + u32 page_size, u32 *real_page_size, bool is_dram_addr) +{ + /* + * The H/W handles mapping of specific page sizes. Hence if the page + * size is bigger, we break it to sub-pages and map them separately. + */ + if ((page_size % mmu_prop->page_size) == 0) { + *real_page_size = mmu_prop->page_size; + return 0; + } + + dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n", + page_size, mmu_prop->page_size >> 10); + + return -EFAULT; +} + +static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size, + bool is_dram_addr) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + + if (is_dram_addr) + return &prop->dmmu; + else if ((page_size % prop->pmmu_huge.page_size) == 0) + return &prop->pmmu_huge; + + return &prop->pmmu; +} + +/* * hl_mmu_unmap_page - unmaps a virtual addr * * @ctx: pointer to the context structure @@ -142,60 +203,35 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) * For optimization reasons PCI flush may be requested once after unmapping of * large area. */ -int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, - bool flush_pte) +int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte) { struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_mmu_properties *mmu_prop; - u64 real_virt_addr; + struct hl_mmu_funcs *mmu_funcs; + int i, pgt_residency, rc = 0; u32 real_page_size, npages; - int i, rc = 0, pgt_residency; + u64 real_virt_addr; bool is_dram_addr; if (!hdev->mmu_enable) return 0; is_dram_addr = hl_is_dram_va(hdev, virt_addr); - - if (is_dram_addr) - mmu_prop = &prop->dmmu; - else if ((page_size % prop->pmmu_huge.page_size) == 0) - mmu_prop = &prop->pmmu_huge; - else - mmu_prop = &prop->pmmu; + mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr); pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; - /* - * The H/W handles mapping of specific page sizes. Hence if the page - * size is bigger, we break it to sub-pages and unmap them separately. - */ - if ((page_size % mmu_prop->page_size) == 0) { - real_page_size = mmu_prop->page_size; - } else { - /* - * MMU page size may differ from DRAM page size. - * In such case work with the DRAM page size and let the MMU - * scrambling routine to handle this mismatch when - * calculating the address to remove from the MMU page table - */ - if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) { - real_page_size = prop->dram_page_size; - } else { - dev_err(hdev->dev, - "page size of %u is not %uKB aligned, can't unmap\n", - page_size, mmu_prop->page_size >> 10); + mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr); - return -EFAULT; - } - } + rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size, + is_dram_addr); + if (rc) + return rc; npages = page_size / real_page_size; real_virt_addr = virt_addr; for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func[pgt_residency].unmap(ctx, - real_virt_addr, is_dram_addr); + rc = mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr); if (rc) break; @@ -203,7 +239,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, } if (flush_pte) - hdev->mmu_func[pgt_residency].flush(ctx); + mmu_funcs->flush(ctx); return rc; } @@ -230,15 +266,15 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, * For optimization reasons PCI flush may be requested once after mapping of * large area. */ -int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, - u32 page_size, bool flush_pte) +int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, + bool flush_pte) { + int i, rc, pgt_residency, mapped_cnt = 0; struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_mmu_properties *mmu_prop; u64 real_virt_addr, real_phys_addr; + struct hl_mmu_funcs *mmu_funcs; u32 real_page_size, npages; - int i, rc, pgt_residency, mapped_cnt = 0; bool is_dram_addr; @@ -246,40 +282,15 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, return 0; is_dram_addr = hl_is_dram_va(hdev, virt_addr); - - if (is_dram_addr) - mmu_prop = &prop->dmmu; - else if ((page_size % prop->pmmu_huge.page_size) == 0) - mmu_prop = &prop->pmmu_huge; - else - mmu_prop = &prop->pmmu; + mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr); pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr); - /* - * The H/W handles mapping of specific page sizes. Hence if the page - * size is bigger, we break it to sub-pages and map them separately. - */ - if ((page_size % mmu_prop->page_size) == 0) { - real_page_size = mmu_prop->page_size; - } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) && - (prop->dram_page_size < mmu_prop->page_size)) { - /* - * MMU page size may differ from DRAM page size. - * In such case work with the DRAM page size and let the MMU - * scrambling routine handle this mismatch when calculating - * the address to place in the MMU page table. (in that case - * also make sure that the dram_page_size smaller than the - * mmu page size) - */ - real_page_size = prop->dram_page_size; - } else { - dev_err(hdev->dev, - "page size of %u is not %uKB aligned, can't map\n", - page_size, mmu_prop->page_size >> 10); - - return -EFAULT; - } + rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size, + is_dram_addr); + if (rc) + return rc; /* * Verify that the phys and virt addresses are aligned with the @@ -302,9 +313,8 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, real_phys_addr = phys_addr; for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func[pgt_residency].map(ctx, - real_virt_addr, real_phys_addr, - real_page_size, is_dram_addr); + rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size, + is_dram_addr); if (rc) goto err; @@ -314,22 +324,21 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, } if (flush_pte) - hdev->mmu_func[pgt_residency].flush(ctx); + mmu_funcs->flush(ctx); return 0; err: real_virt_addr = virt_addr; for (i = 0 ; i < mapped_cnt ; i++) { - if (hdev->mmu_func[pgt_residency].unmap(ctx, - real_virt_addr, is_dram_addr)) + if (mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr)) dev_warn_ratelimited(hdev->dev, "failed to unmap va: 0x%llx\n", real_virt_addr); real_virt_addr += real_page_size; } - hdev->mmu_func[pgt_residency].flush(ctx); + mmu_funcs->flush(ctx); return rc; } @@ -480,11 +489,9 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops, u64 *phys_addr) { - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; + struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr; - u32 hop0_shift_off; - void *p; + struct hl_mmu_properties *mmu_prop; /* last hop holds the phys address and flags */ if (hops->unscrambled_paddr) @@ -493,11 +500,11 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val; if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE) - p = &prop->pmmu_huge; + mmu_prop = &prop->pmmu_huge; else if (hops->range_type == HL_VA_RANGE_TYPE_HOST) - p = &prop->pmmu; + mmu_prop = &prop->pmmu; else /* HL_VA_RANGE_TYPE_DRAM */ - p = &prop->dmmu; + mmu_prop = &prop->dmmu; if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) && !is_power_of_2(prop->dram_page_size)) { @@ -508,7 +515,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, /* * Bit arithmetics cannot be used for non power of two page * sizes. In addition, since bit arithmetics is not used, - * we cannot ignore dram base. All that shall be considerd. + * we cannot ignore dram base. All that shall be considered. */ dram_page_size = prop->dram_page_size; @@ -526,10 +533,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, * structure in order to determine the right masks * for the page offset. */ - hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift); - p = (char *)p + hop0_shift_off; - p = (char *)p + ((hops->used_hops - 1) * sizeof(u64)); - hop_shift = *(u64 *)p; + hop_shift = mmu_prop->hop_shifts[hops->used_hops - 1]; offset_mask = (1ull << hop_shift) - 1; addr_mask = ~(offset_mask); *phys_addr = (tmp_phys_addr & addr_mask) | @@ -557,40 +561,39 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops) { struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; + struct asic_fixed_properties *prop; struct hl_mmu_properties *mmu_prop; - int rc; + struct hl_mmu_funcs *mmu_funcs; + int pgt_residency, rc; bool is_dram_addr; if (!hdev->mmu_enable) return -EOPNOTSUPP; + prop = &hdev->asic_prop; hops->scrambled_vaddr = virt_addr; /* assume no scrambling */ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); + prop->dmmu.start_addr, + prop->dmmu.end_addr); - /* host-residency is the same in PMMU and HPMMU, use one of them */ + /* host-residency is the same in PMMU and PMMU huge, no need to distinguish here */ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr); mutex_lock(&ctx->mmu_lock); - - if (mmu_prop->host_resident) - rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx, - virt_addr, hops); - else - rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx, - virt_addr, hops); - + rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops); mutex_unlock(&ctx->mmu_lock); + if (rc) + return rc; + /* add page offset to physical address */ if (hops->unscrambled_paddr) - hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, - &hops->unscrambled_paddr); + hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, &hops->unscrambled_paddr); - return rc; + return 0; } int hl_mmu_if_set_funcs(struct hl_device *hdev) @@ -662,6 +665,55 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard, return rc; } +static void hl_mmu_prefetch_work_function(struct work_struct *work) +{ + struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work); + struct hl_ctx *ctx = pfw->ctx; + + if (!hl_device_operational(ctx->hdev, NULL)) + goto put_ctx; + + mutex_lock(&ctx->mmu_lock); + + ctx->hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid, + pfw->va, pfw->size); + + mutex_unlock(&ctx->mmu_lock); + +put_ctx: + /* + * context was taken in the common mmu prefetch function- see comment there about + * context handling. + */ + hl_ctx_put(ctx); + kfree(pfw); +} + +int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size) +{ + struct hl_prefetch_work *handle_pf_work; + + handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL); + if (!handle_pf_work) + return -ENOMEM; + + INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function); + handle_pf_work->ctx = ctx; + handle_pf_work->va = va; + handle_pf_work->size = size; + handle_pf_work->flags = flags; + handle_pf_work->asid = asid; + + /* + * as actual prefetch is done in a WQ we must get the context (and put it + * at the end of the work function) + */ + hl_ctx_get(ctx); + queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work); + + return 0; +} + u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte) { return (curr_pte & PAGE_PRESENT_MASK) ? (curr_pte & HOP_PHYS_ADDR_MASK) : ULLONG_MAX; @@ -670,6 +722,7 @@ u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte) /** * hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP * @ctx: pointer to the context structure to initialize. + * @mmu_prop: MMU properties. * @hop_idx: HOP index. * @hop_addr: HOP address. * @virt_addr: virtual address fro the translation. @@ -686,33 +739,8 @@ u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *m return U64_MAX; } - /* currently max number of HOPs is 6 */ - switch (hop_idx) { - case 0: - mask = mmu_prop->hop0_mask; - shift = mmu_prop->hop0_shift; - break; - case 1: - mask = mmu_prop->hop1_mask; - shift = mmu_prop->hop1_shift; - break; - case 2: - mask = mmu_prop->hop2_mask; - shift = mmu_prop->hop2_shift; - break; - case 3: - mask = mmu_prop->hop3_mask; - shift = mmu_prop->hop3_shift; - break; - case 4: - mask = mmu_prop->hop4_mask; - shift = mmu_prop->hop4_shift; - break; - default: - mask = mmu_prop->hop5_mask; - shift = mmu_prop->hop5_shift; - break; - } + shift = mmu_prop->hop_shifts[hop_idx]; + mask = mmu_prop->hop_masks[hop_idx]; return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift); } diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c index d03786d0c407..e2d91a69acc2 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c @@ -10,6 +10,8 @@ #include <linux/slab.h> +#define MMU_V1_MAX_HOPS (MMU_HOP4 + 1) + static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr); static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) @@ -170,51 +172,15 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) return num_of_ptes_left; } -static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, - u64 virt_addr, u64 mask, u64 shift) -{ - return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * - ((virt_addr & mask) >> shift); -} - -static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask, - mmu_prop->hop0_shift); -} - -static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask, - mmu_prop->hop1_shift); -} - -static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) +static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop, + u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx) { - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask, - mmu_prop->hop2_shift); -} + u64 mask, shift; -static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask, - mmu_prop->hop3_shift); -} - -static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask, - mmu_prop->hop4_shift); + mask = mmu_prop->hop_masks[hop_idx]; + shift = mmu_prop->hop_shifts[hop_idx]; + return hop_addr_arr[hop_idx] + + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift); } static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, @@ -516,74 +482,50 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) } } -static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, +static int hl_mmu_v1_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr) { + u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0; struct hl_device *hdev = ctx->hdev; struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_mmu_properties *mmu_prop; - u64 hop0_addr = 0, hop0_pte_addr = 0, - hop1_addr = 0, hop1_pte_addr = 0, - hop2_addr = 0, hop2_pte_addr = 0, - hop3_addr = 0, hop3_pte_addr = 0, - hop4_addr = 0, hop4_pte_addr = 0, - curr_pte; bool is_huge, clear_hop3 = true; + int hop_idx; /* shifts and masks are the same in PMMU and HPMMU, use one of them */ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; - hop0_addr = get_hop0_addr(ctx); - hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; - - hop1_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); - - if (hop1_addr == ULLONG_MAX) - goto not_mapped; - - hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; - - hop2_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); - - if (hop2_addr == ULLONG_MAX) - goto not_mapped; - - hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; - - hop3_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); - - if (hop3_addr == ULLONG_MAX) - goto not_mapped; + for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) { + if (hop_idx == MMU_HOP0) { + hop_addr[hop_idx] = get_hop0_addr(ctx); + } else { + hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte); + if (hop_addr[hop_idx] == ULLONG_MAX) + goto not_mapped; + } - hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); + hop_pte_addr[hop_idx] = + get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx); - curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; + curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx]; + } is_huge = curr_pte & mmu_prop->last_mask; if (is_dram_addr && !is_huge) { - dev_err(hdev->dev, - "DRAM unmapping should use huge pages only\n"); + dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n"); return -EFAULT; } if (!is_huge) { - hop4_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); - - if (hop4_addr == ULLONG_MAX) + hop_idx = MMU_HOP4; + hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte); + if (hop_addr[hop_idx] == ULLONG_MAX) goto not_mapped; - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, - virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; - + hop_pte_addr[hop_idx] = + get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx); + curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx]; clear_hop3 = false; } @@ -605,39 +547,33 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, goto not_mapped; } - write_final_pte(ctx, hop3_pte_addr, default_pte); - put_pte(ctx, hop3_addr); + hop_idx = MMU_HOP3; + write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte); + put_pte(ctx, hop_addr[hop_idx]); } else { if (!(curr_pte & PAGE_PRESENT_MASK)) goto not_mapped; - if (hop4_addr) - clear_pte(ctx, hop4_pte_addr); + if (hop_addr[MMU_HOP4]) + clear_pte(ctx, hop_pte_addr[MMU_HOP4]); else - clear_pte(ctx, hop3_pte_addr); + clear_pte(ctx, hop_pte_addr[MMU_HOP3]); - if (hop4_addr && !put_pte(ctx, hop4_addr)) + if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4])) clear_hop3 = true; if (!clear_hop3) goto mapped; - clear_pte(ctx, hop3_pte_addr); + for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) { + clear_pte(ctx, hop_pte_addr[hop_idx]); - if (put_pte(ctx, hop3_addr)) - goto mapped; + if (hop_idx == MMU_HOP0) + break; - clear_pte(ctx, hop2_pte_addr); - - if (put_pte(ctx, hop2_addr)) - goto mapped; - - clear_pte(ctx, hop1_pte_addr); - - if (put_pte(ctx, hop1_addr)) - goto mapped; - - clear_pte(ctx, hop0_pte_addr); + if (put_pte(ctx, hop_addr[hop_idx])) + goto mapped; + } } mapped: @@ -650,21 +586,15 @@ not_mapped: return -EINVAL; } -static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, +static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, bool is_dram_addr) { + u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0; struct hl_device *hdev = ctx->hdev; struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_mmu_properties *mmu_prop; - u64 hop0_addr = 0, hop0_pte_addr = 0, - hop1_addr = 0, hop1_pte_addr = 0, - hop2_addr = 0, hop2_pte_addr = 0, - hop3_addr = 0, hop3_pte_addr = 0, - hop4_addr = 0, hop4_pte_addr = 0, - curr_pte = 0; - bool hop1_new = false, hop2_new = false, hop3_new = false, - hop4_new = false, is_huge; - int rc = -ENOMEM; + bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false}; + int num_hops, hop_idx, prev_hop, rc = -ENOMEM; /* * This mapping function can map a page or a huge page. For huge page @@ -684,39 +614,21 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, is_huge = false; } - hop0_addr = get_hop0_addr(ctx); - hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; - - hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new); - if (hop1_addr == ULLONG_MAX) - goto err; - - hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; - - hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new); - if (hop2_addr == ULLONG_MAX) - goto err; - - hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; + num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS; - hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new); - if (hop3_addr == ULLONG_MAX) - goto err; - - hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; - - if (!is_huge) { - hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new); - if (hop4_addr == ULLONG_MAX) - goto err; + for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) { + if (hop_idx == MMU_HOP0) { + hop_addr[hop_idx] = get_hop0_addr(ctx); + } else { + hop_addr[hop_idx] = + get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]); + if (hop_addr[hop_idx] == ULLONG_MAX) + goto err; + } - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, - virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; + hop_pte_addr[hop_idx] = + get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx); + curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx]; } if (hdev->dram_default_page_mapping && is_dram_addr) { @@ -732,30 +644,22 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, goto err; } - if (hop1_new || hop2_new || hop3_new || hop4_new) { - dev_err(hdev->dev, - "DRAM mapping should not allocate more hops\n"); - rc = -EFAULT; - goto err; + for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) { + if (hop_new[hop_idx]) { + dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n"); + rc = -EFAULT; + goto err; + } } } else if (curr_pte & PAGE_PRESENT_MASK) { dev_err(hdev->dev, "mapping already exists for virt_addr 0x%llx\n", virt_addr); - dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr); - dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr); - dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr); - dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr); - - if (!is_huge) - dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop4_pte_addr, - hop4_pte_addr); + for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) + dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx, + *(u64 *) (uintptr_t) hop_pte_addr[hop_idx], + hop_pte_addr[hop_idx]); rc = -EINVAL; goto err; @@ -764,53 +668,28 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | PAGE_PRESENT_MASK; - if (is_huge) - write_final_pte(ctx, hop3_pte_addr, curr_pte); - else - write_final_pte(ctx, hop4_pte_addr, curr_pte); + write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte); - if (hop1_new) { - curr_pte = - (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop0_pte_addr, curr_pte); - } - if (hop2_new) { - curr_pte = - (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop1_pte_addr, curr_pte); - get_pte(ctx, hop1_addr); - } - if (hop3_new) { - curr_pte = - (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop2_pte_addr, curr_pte); - get_pte(ctx, hop2_addr); - } + for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) { + prev_hop = hop_idx - 1; - if (!is_huge) { - if (hop4_new) { - curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) | - PAGE_PRESENT_MASK; - write_pte(ctx, hop3_pte_addr, curr_pte); - get_pte(ctx, hop3_addr); + if (hop_new[hop_idx]) { + curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop_pte_addr[prev_hop], curr_pte); + if (hop_idx != MMU_HOP1) + get_pte(ctx, hop_addr[prev_hop]); } - - get_pte(ctx, hop4_addr); - } else { - get_pte(ctx, hop3_addr); } + get_pte(ctx, hop_addr[num_hops - 1]); + return 0; err: - if (hop4_new) - free_hop(ctx, hop4_addr); - if (hop3_new) - free_hop(ctx, hop3_addr); - if (hop2_new) - free_hop(ctx, hop2_addr); - if (hop1_new) - free_hop(ctx, hop1_addr); + for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) { + if (hop_new[hop_idx]) + free_hop(ctx, hop_addr[hop_idx]); + } return rc; } @@ -928,8 +807,8 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) mmu->fini = hl_mmu_v1_fini; mmu->ctx_init = hl_mmu_v1_ctx_init; mmu->ctx_fini = hl_mmu_v1_ctx_fini; - mmu->map = _hl_mmu_v1_map; - mmu->unmap = _hl_mmu_v1_unmap; + mmu->map = hl_mmu_v1_map; + mmu->unmap = hl_mmu_v1_unmap; mmu->flush = flush; mmu->swap_out = hl_mmu_v1_swap_out; mmu->swap_in = hl_mmu_v1_swap_in; diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c index bb9ce22bafc4..610acd4a8057 100644 --- a/drivers/misc/habanalabs/common/pci/pci.c +++ b/drivers/misc/habanalabs/common/pci/pci.c @@ -392,6 +392,7 @@ enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr) */ int hl_pci_init(struct hl_device *hdev) { + struct asic_fixed_properties *prop = &hdev->asic_prop; struct pci_dev *pdev = hdev->pdev; int rc; @@ -419,17 +420,14 @@ int hl_pci_init(struct hl_device *hdev) } /* Driver must sleep in order for FW to finish the iATU configuration */ - if (hdev->asic_prop.iatu_done_by_fw) { + if (hdev->asic_prop.iatu_done_by_fw) usleep_range(2000, 3000); - hdev->asic_funcs->set_dma_mask_from_fw(hdev); - } - rc = dma_set_mask_and_coherent(&pdev->dev, - DMA_BIT_MASK(hdev->dma_mask)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask)); if (rc) { dev_err(hdev->dev, "Failed to set dma mask to %d bits, error %d\n", - hdev->dma_mask, rc); + prop->dma_mask, rc); goto unmap_pci_bars; } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 21c2b678ff72..fba322241096 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -95,7 +95,7 @@ #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3 -#define GAUDI_ARB_WDT_TIMEOUT 0x1000000 +#define GAUDI_ARB_WDT_TIMEOUT 0xEE6b27FF /* 8 seconds */ #define GAUDI_CLK_GATE_DEBUGFS_MASK (\ BIT(GAUDI_ENGINE_ID_MME_0) |\ @@ -557,6 +557,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev) } prop->device_dma_offset_for_host_access = HOST_PHYS_BASE; + prop->host_base_address = HOST_PHYS_BASE; + prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE; prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; prop->collective_first_sob = 0; prop->collective_first_mon = 0; @@ -595,18 +597,19 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev) prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE; prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; + prop->device_mem_alloc_default_page_size = prop->dram_page_size; prop->dram_supports_virtual_memory = false; - prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT; - prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT; - prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT; - prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT; - prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT; - prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK; - prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK; - prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK; - prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK; - prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK; + prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT; + prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT; + prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT; + prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT; + prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT; + prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK; + prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK; + prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK; + prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK; + prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK; prop->pmmu.start_addr = VA_HOST_SPACE_START; prop->pmmu.end_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1; @@ -673,6 +676,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev) prop->set_max_power_on_device_init = true; + prop->dma_mask = 48; + return 0; } @@ -754,8 +759,6 @@ static int gaudi_init_iatu(struct hl_device *hdev) if (rc) goto done; - hdev->asic_funcs->set_dma_mask_from_fw(hdev); - /* Outbound Region 0 - Point to Host */ outbound_region.addr = HOST_PHYS_BASE; outbound_region.size = HOST_PHYS_SIZE; @@ -1008,7 +1011,7 @@ free_job: release_cb: hl_cb_put(cb); - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); return rc; } @@ -1470,7 +1473,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev, job->patched_cb = NULL; job->job_cb_size = job->user_cb_size; - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); /* increment refcount as for external queues we get completion */ if (hw_queue_prop->type == QUEUE_TYPE_EXT) @@ -2808,9 +2811,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id, WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset, QM_ARB_ERR_MSG_EN_MASK); - /* Increase ARB WDT to support streams architecture */ - WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, - GAUDI_ARB_WDT_TIMEOUT); + /* Set timeout to maximum */ + WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT); WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset, QMAN_EXTERNAL_MAKE_TRUSTED); @@ -2987,9 +2989,8 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id, WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset, QM_ARB_ERR_MSG_EN_MASK); - /* Increase ARB WDT to support streams architecture */ - WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, - GAUDI_ARB_WDT_TIMEOUT); + /* Set timeout to maximum */ + WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT); WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0); WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset, @@ -3124,9 +3125,8 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset, WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset, QM_ARB_ERR_MSG_EN_MASK); - /* Increase ARB WDT to support streams architecture */ - WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset, - GAUDI_ARB_WDT_TIMEOUT); + /* Set timeout to maximum */ + WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset, GAUDI_ARB_WDT_TIMEOUT); WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0); WREG32(mmMME0_QM_GLBL_PROT + mme_offset, @@ -3258,9 +3258,8 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset, QM_ARB_ERR_MSG_EN_MASK); - /* Increase ARB WDT to support streams architecture */ - WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset, - GAUDI_ARB_WDT_TIMEOUT); + /* Set timeout to maximum */ + WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset, GAUDI_ARB_WDT_TIMEOUT); WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0); WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset, @@ -3409,9 +3408,8 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset, WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset, QM_ARB_ERR_MSG_EN_MASK); - /* Increase ARB WDT to support streams architecture */ - WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, - GAUDI_ARB_WDT_TIMEOUT); + /* Set timeout to maximum */ + WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, GAUDI_ARB_WDT_TIMEOUT); WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0); WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset, @@ -3792,9 +3790,6 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_ { u32 wait_timeout_ms; - dev_info(hdev->dev, - "Halting compute engines and disabling interrupts\n"); - if (hdev->pldm) wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC; else @@ -4212,7 +4207,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset } if (fw_reset) { - dev_info(hdev->dev, + dev_dbg(hdev->dev, "Firmware performs HARD reset, going to wait %dms\n", reset_timeout_ms); @@ -4304,11 +4299,11 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST, 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT); - dev_info(hdev->dev, + dev_dbg(hdev->dev, "Issued HARD reset command, going to wait %dms\n", reset_timeout_ms); } else { - dev_info(hdev->dev, + dev_dbg(hdev->dev, "Firmware performs HARD reset, going to wait %dms\n", reset_timeout_ms); } @@ -4745,12 +4740,11 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size, dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); } -static int gaudi_hbm_scrubbing(struct hl_device *hdev) +static int gaudi_scrub_device_dram(struct hl_device *hdev, u64 val) { struct asic_fixed_properties *prop = &hdev->asic_prop; u64 cur_addr = DRAM_BASE_ADDR_USER; - u32 val; - u32 chunk_size; + u32 chunk_size, busy; int rc, dma_id; while (cur_addr < prop->dram_end_address) { @@ -4764,8 +4758,10 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev) "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n", cur_addr, cur_addr + chunk_size); - WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf); - WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf); + WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, + lower_32_bits(val)); + WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, + upper_32_bits(val)); WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset, lower_32_bits(cur_addr)); WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset, @@ -4788,8 +4784,8 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev) rc = hl_poll_timeout( hdev, mmDMA0_CORE_STS0 + dma_offset, - val, - ((val & DMA0_CORE_STS0_BUSY_MASK) == 0), + busy, + ((busy & DMA0_CORE_STS0_BUSY_MASK) == 0), 1000, HBM_SCRUBBING_TIMEOUT_US); @@ -4843,7 +4839,7 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size) } /* Scrub HBM using all DMA channels in parallel */ - rc = gaudi_hbm_scrubbing(hdev); + rc = gaudi_scrub_device_dram(hdev, 0xdeadbeaf); if (rc) dev_err(hdev->dev, "Failed to clear HBM in mem scrub all\n"); @@ -5038,37 +5034,7 @@ static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev, hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr); } -static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir)) - return -ENOMEM; - - /* Shift to the device's base physical address of host memory */ - for_each_sg(sgl, sg, nents, i) - sg->dma_address += HOST_PHYS_BASE; - - return 0; -} - -static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - /* Cancel the device's base physical address of host memory */ - for_each_sg(sgl, sg, nents, i) - sg->dma_address -= HOST_PHYS_BASE; - - dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir); -} - -static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, - struct sg_table *sgt) +static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt) { struct scatterlist *sg, *sg_next_iter; u32 count, dma_desc_cnt; @@ -5077,8 +5043,7 @@ static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, dma_desc_cnt = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - + for_each_sgtable_dma_sg(sgt, sg, count) { len = sg_dma_len(sg); addr = sg_dma_address(sg); @@ -5132,8 +5097,7 @@ static int gaudi_pin_memory_before_cs(struct hl_device *hdev, list_add_tail(&userptr->job_node, parser->job_userptr_list); - rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, - userptr->sgt->nents, dir); + rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir); if (rc) { dev_err(hdev->dev, "failed to map sgt with DMA region\n"); goto unpin_memory; @@ -5408,7 +5372,7 @@ static int gaudi_patch_dma_packet(struct hl_device *hdev, sgt = userptr->sgt; dma_desc_cnt = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { + for_each_sgtable_dma_sg(sgt, sg, count) { len = sg_dma_len(sg); dma_addr = sg_dma_address(sg); @@ -5562,7 +5526,7 @@ static int gaudi_patch_cb(struct hl_device *hdev, static int gaudi_parse_cb_mmu(struct hl_device *hdev, struct hl_cs_parser *parser) { - u64 patched_cb_handle; + u64 handle; u32 patched_cb_size; struct hl_cb *user_cb; int rc; @@ -5578,9 +5542,9 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev, else parser->patched_cb_size = parser->user_cb_size; - rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, parser->patched_cb_size, false, false, - &patched_cb_handle); + &handle); if (rc) { dev_err(hdev->dev, @@ -5589,13 +5553,10 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev, return rc; } - patched_cb_handle >>= PAGE_SHIFT; - parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, - (u32) patched_cb_handle); + parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); /* hl_cb_get should never fail */ if (!parser->patched_cb) { - dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); rc = -EFAULT; goto out; } @@ -5635,8 +5596,7 @@ out: * cb_put will release it, but here we want to remove it from the * idr */ - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, - patched_cb_handle << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, handle); return rc; } @@ -5644,7 +5604,7 @@ out: static int gaudi_parse_cb_no_mmu(struct hl_device *hdev, struct hl_cs_parser *parser) { - u64 patched_cb_handle; + u64 handle; int rc; rc = gaudi_validate_cb(hdev, parser, false); @@ -5652,22 +5612,19 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev, if (rc) goto free_userptr; - rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, parser->patched_cb_size, false, false, - &patched_cb_handle); + &handle); if (rc) { dev_err(hdev->dev, "Failed to allocate patched CB for DMA CS %d\n", rc); goto free_userptr; } - patched_cb_handle >>= PAGE_SHIFT; - parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, - (u32) patched_cb_handle); + parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); /* hl_cb_get should never fail here */ if (!parser->patched_cb) { - dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); rc = -EFAULT; goto out; } @@ -5684,8 +5641,7 @@ out: * cb_put will release it, but here we want to remove it from the * idr */ - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, - patched_cb_handle << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, handle); free_userptr: if (rc) @@ -5798,7 +5754,6 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, struct hl_cs_job *job; u32 cb_size, ctl, err_cause; struct hl_cb *cb; - u64 id; int rc; cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false); @@ -5865,9 +5820,8 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, } release_cb: - id = cb->id; hl_cb_put(cb); - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); return rc; } @@ -5930,7 +5884,7 @@ static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base, release_cb: hl_cb_put(cb); - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); return rc; } @@ -6101,184 +6055,6 @@ static void gaudi_restore_phase_topology(struct hl_device *hdev) } -static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, - bool user_address, u32 *val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 hbm_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - - *val = RREG32(addr - CFG_BASE); - - } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { - - *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR)); - - } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { - - u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); - - if (hbm_bar_addr != U64_MAX) { - *val = readl(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr)); - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr); - } - - if (hbm_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - - *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE); - - } else { - rc = -EFAULT; - } - - return rc; -} - -static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, - bool user_address, u32 val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 hbm_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - - WREG32(addr - CFG_BASE, val); - - } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { - - writel(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR)); - - } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { - - u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); - - if (hbm_bar_addr != U64_MAX) { - writel(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr)); - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr); - } - - if (hbm_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - - *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; - - } else { - rc = -EFAULT; - } - - return rc; -} - -static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, - bool user_address, u64 *val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 hbm_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - - u32 val_l = RREG32(addr - CFG_BASE); - u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE); - - *val = (((u64) val_h) << 32) | val_l; - - } else if ((addr >= SRAM_BASE_ADDR) && - (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { - - *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR)); - - } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { - - u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); - - if (hbm_bar_addr != U64_MAX) { - *val = readq(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr)); - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr); - } - - if (hbm_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - - *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE); - - } else { - rc = -EFAULT; - } - - return rc; -} - -static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, - bool user_address, u64 val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 hbm_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - - WREG32(addr - CFG_BASE, lower_32_bits(val)); - WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val)); - - } else if ((addr >= SRAM_BASE_ADDR) && - (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { - - writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR)); - - } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { - - u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); - - if (hbm_bar_addr != U64_MAX) { - writeq(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr)); - hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr); - } - - if (hbm_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - - *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; - - } else { - rc = -EFAULT; - } - - return rc; -} - static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr, u32 size_to_dma, dma_addr_t dma_addr) { @@ -7628,19 +7404,18 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type, gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type); /* In case it's the first razwi, save its parameters*/ - rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1); + rc = atomic_cmpxchg(&hdev->last_error.razwi.write_disable, 0, 1); if (!rc) { - hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime; - hdev->last_error.razwi_timestamp = ktime_get(); - hdev->last_error.razwi_addr = razwi_addr; - hdev->last_error.razwi_engine_id_1 = engine_id_1; - hdev->last_error.razwi_engine_id_2 = engine_id_2; + hdev->last_error.razwi.timestamp = ktime_get(); + hdev->last_error.razwi.addr = razwi_addr; + hdev->last_error.razwi.engine_id_1 = engine_id_1; + hdev->last_error.razwi.engine_id_2 = engine_id_2; /* * If first engine id holds non valid value the razwi initiator * does not have engine id */ - hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX); - hdev->last_error.razwi_type = razwi_type; + hdev->last_error.razwi.non_engine_initiator = (engine_id_1 == U16_MAX); + hdev->last_error.razwi.type = razwi_type; } } @@ -8103,7 +7878,6 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_MMU_PAGE_FAULT: case GAUDI_EVENT_MMU_WR_PERM: case GAUDI_EVENT_RAZWI_OR_ADC: - case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM: case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM: fallthrough; @@ -8123,6 +7897,19 @@ static void gaudi_handle_eqe(struct hl_device *hdev, hl_fw_unmask_irq(hdev, event_type); break; + case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: + gaudi_print_irq_info(hdev, event_type, true); + gaudi_handle_qman_err(hdev, event_type); + hl_fw_unmask_irq(hdev, event_type); + + /* In TPC QM event, notify on TPC assertion. While there isn't + * a specific event for assertion yet, the FW generates QM event. + * The SW upper layer will inspect an internal mapped area to indicate + * if the event is a tpc assertion or tpc QM. + */ + hl_notifier_event_send_all(hdev, HL_NOTIFIER_EVENT_TPC_ASSERT); + break; + case GAUDI_EVENT_RAZWI_OR_ADC_SW: gaudi_print_irq_info(hdev, event_type, true); goto reset_device; @@ -8328,8 +8115,6 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev) set_default_power_values(hdev); - hdev->max_power = prop->max_power_default; - return 0; } @@ -8501,6 +8286,16 @@ static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data, return hl_fw_get_eeprom_data(hdev, data, max_size); } +static int gaudi_get_monitor_dump(struct hl_device *hdev, void *data) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + return hl_fw_get_monitor_dump(hdev, data); +} + /* * this function should be used only during initialization and/or after reset, * when there are no active users. @@ -9066,11 +8861,6 @@ static void gaudi_reset_sob(struct hl_device *hdev, void *data) kref_init(&hw_sob->kref); } -static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev) -{ - hdev->dma_mask = 48; -} - static u64 gaudi_get_device_time(struct hl_device *hdev) { u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32; @@ -9132,7 +8922,7 @@ static int gaudi_add_sync_to_engine_map_entry( */ if (reg_value == 0 || reg_value == 0xffffffff) return 0; - reg_value -= (u32)CFG_BASE; + reg_value -= lower_32_bits(CFG_BASE); /* create a new hash entry */ entry = kzalloc(sizeof(*entry), GFP_KERNEL); @@ -9377,6 +9167,12 @@ static u32 *gaudi_get_stream_master_qid_arr(void) return gaudi_stream_master; } +static void gaudi_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info) +{ + /* set 0 since multiple pages are not supported */ + info->page_order_bitmask = 0; +} + static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hl_device *hdev = dev_get_drvdata(dev); @@ -9418,24 +9214,21 @@ static const struct hl_asic_funcs gaudi_funcs = { .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent, .asic_dma_free_coherent = gaudi_dma_free_coherent, .scrub_device_mem = gaudi_scrub_device_mem, + .scrub_device_dram = gaudi_scrub_device_dram, .get_int_queue_base = gaudi_get_int_queue_base, .test_queues = gaudi_test_queues, .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc, .asic_dma_pool_free = gaudi_dma_pool_free, .cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc, .cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free, - .hl_dma_unmap_sg = gaudi_dma_unmap_sg, + .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable, .cs_parser = gaudi_cs_parser, - .asic_dma_map_sg = gaudi_dma_map_sg, + .asic_dma_map_sgtable = hl_dma_map_sgtable, .get_dma_desc_list_size = gaudi_get_dma_desc_list_size, .add_end_of_cb_packets = gaudi_add_end_of_cb_packets, .update_eq_ci = gaudi_update_eq_ci, .context_switch = gaudi_context_switch, .restore_phase_topology = gaudi_restore_phase_topology, - .debugfs_read32 = gaudi_debugfs_read32, - .debugfs_write32 = gaudi_debugfs_write32, - .debugfs_read64 = gaudi_debugfs_read64, - .debugfs_write64 = gaudi_debugfs_write64, .debugfs_read_dma = gaudi_debugfs_read_dma, .add_device_attr = gaudi_add_device_attr, .handle_eqe = gaudi_handle_eqe, @@ -9444,6 +9237,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .write_pte = gaudi_write_pte, .mmu_invalidate_cache = gaudi_mmu_invalidate_cache, .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range, + .mmu_prefetch_cache_range = NULL, .send_heartbeat = gaudi_send_heartbeat, .debug_coresight = gaudi_debug_coresight, .is_device_idle = gaudi_is_device_idle, @@ -9452,6 +9246,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .hw_queues_unlock = gaudi_hw_queues_unlock, .get_pci_id = gaudi_get_pci_id, .get_eeprom_data = gaudi_get_eeprom_data, + .get_monitor_dump = gaudi_get_monitor_dump, .send_cpu_message = gaudi_send_cpu_message, .pci_bars_map = gaudi_pci_bars_map, .init_iatu = gaudi_init_iatu, @@ -9469,7 +9264,6 @@ static const struct hl_asic_funcs gaudi_funcs = { .gen_wait_cb = gaudi_gen_wait_cb, .reset_sob = gaudi_reset_sob, .reset_sob_group = gaudi_reset_sob_group, - .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw, .get_device_time = gaudi_get_device_time, .collective_wait_init_cs = gaudi_collective_wait_init_cs, .collective_wait_create_jobs = gaudi_collective_wait_create_jobs, @@ -9486,7 +9280,11 @@ static const struct hl_asic_funcs gaudi_funcs = { .get_sob_addr = gaudi_get_sob_addr, .set_pci_memory_regions = gaudi_set_pci_memory_regions, .get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr, - .is_valid_dram_page_size = NULL + .is_valid_dram_page_size = NULL, + .mmu_get_real_page_size = hl_mmu_get_real_page_size, + .get_valid_dram_page_orders = gaudi_get_valid_dram_page_orders, + .access_dev_mem = hl_access_dev_mem, + .set_dram_bar_base = gaudi_set_hbm_bar_base, }; /** diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 54de7c599072..4fbcf3f0afe5 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -148,14 +148,14 @@ #define MME_QMAN_LENGTH 1024 #define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) -#define HBM_DMA_QMAN_LENGTH 1024 +#define HBM_DMA_QMAN_LENGTH 4096 #define HBM_DMA_QMAN_SIZE_IN_BYTES \ (HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) #define TPC_QMAN_LENGTH 1024 #define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) -#define NIC_QMAN_LENGTH 1024 +#define NIC_QMAN_LENGTH 4096 #define NIC_QMAN_SIZE_IN_BYTES (NIC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index ec9358bcbf0b..4cde505a7416 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -390,6 +390,8 @@ int goya_set_fixed_properties(struct hl_device *hdev) } prop->device_dma_offset_for_host_access = HOST_PHYS_BASE; + prop->host_base_address = HOST_PHYS_BASE; + prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE; prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; prop->dram_base_address = DRAM_PHYS_BASE; @@ -413,18 +415,19 @@ int goya_set_fixed_properties(struct hl_device *hdev) prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE; prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; + prop->device_mem_alloc_default_page_size = prop->dram_page_size; prop->dram_supports_virtual_memory = true; - prop->dmmu.hop0_shift = MMU_V1_0_HOP0_SHIFT; - prop->dmmu.hop1_shift = MMU_V1_0_HOP1_SHIFT; - prop->dmmu.hop2_shift = MMU_V1_0_HOP2_SHIFT; - prop->dmmu.hop3_shift = MMU_V1_0_HOP3_SHIFT; - prop->dmmu.hop4_shift = MMU_V1_0_HOP4_SHIFT; - prop->dmmu.hop0_mask = MMU_V1_0_HOP0_MASK; - prop->dmmu.hop1_mask = MMU_V1_0_HOP1_MASK; - prop->dmmu.hop2_mask = MMU_V1_0_HOP2_MASK; - prop->dmmu.hop3_mask = MMU_V1_0_HOP3_MASK; - prop->dmmu.hop4_mask = MMU_V1_0_HOP4_MASK; + prop->dmmu.hop_shifts[MMU_HOP0] = MMU_V1_0_HOP0_SHIFT; + prop->dmmu.hop_shifts[MMU_HOP1] = MMU_V1_0_HOP1_SHIFT; + prop->dmmu.hop_shifts[MMU_HOP2] = MMU_V1_0_HOP2_SHIFT; + prop->dmmu.hop_shifts[MMU_HOP3] = MMU_V1_0_HOP3_SHIFT; + prop->dmmu.hop_shifts[MMU_HOP4] = MMU_V1_0_HOP4_SHIFT; + prop->dmmu.hop_masks[MMU_HOP0] = MMU_V1_0_HOP0_MASK; + prop->dmmu.hop_masks[MMU_HOP1] = MMU_V1_0_HOP1_MASK; + prop->dmmu.hop_masks[MMU_HOP2] = MMU_V1_0_HOP2_MASK; + prop->dmmu.hop_masks[MMU_HOP3] = MMU_V1_0_HOP3_MASK; + prop->dmmu.hop_masks[MMU_HOP4] = MMU_V1_0_HOP4_MASK; prop->dmmu.start_addr = VA_DDR_SPACE_START; prop->dmmu.end_addr = VA_DDR_SPACE_END; prop->dmmu.page_size = PAGE_SIZE_2MB; @@ -487,6 +490,8 @@ int goya_set_fixed_properties(struct hl_device *hdev) prop->set_max_power_on_device_init = true; + prop->dma_mask = 48; + return 0; } @@ -574,8 +579,6 @@ static int goya_init_iatu(struct hl_device *hdev) if (rc) goto done; - hdev->asic_funcs->set_dma_mask_from_fw(hdev); - /* Outbound Region 0 - Point to Host */ outbound_region.addr = HOST_PHYS_BASE; outbound_region.size = HOST_PHYS_SIZE; @@ -2479,9 +2482,6 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_r { u32 wait_timeout_ms; - dev_info(hdev->dev, - "Halting compute engines and disabling interrupts\n"); - if (hdev->pldm) wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC; else @@ -2825,12 +2825,12 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset) goya_set_pll_refclk(hdev); WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL); - dev_info(hdev->dev, + dev_dbg(hdev->dev, "Issued HARD reset command, going to wait %dms\n", reset_timeout_ms); } else { WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET); - dev_info(hdev->dev, + dev_dbg(hdev->dev, "Issued SOFT reset command, going to wait %dms\n", reset_timeout_ms); } @@ -3311,35 +3311,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr); } -static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir)) - return -ENOMEM; - - /* Shift to the device's base physical address of host memory */ - for_each_sg(sgl, sg, nents, i) - sg->dma_address += HOST_PHYS_BASE; - - return 0; -} - -static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - /* Cancel the device's base physical address of host memory */ - for_each_sg(sgl, sg, nents, i) - sg->dma_address -= HOST_PHYS_BASE; - - dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir); -} - u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt) { struct scatterlist *sg, *sg_next_iter; @@ -3349,8 +3320,7 @@ u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt) dma_desc_cnt = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - + for_each_sgtable_dma_sg(sgt, sg, count) { len = sg_dma_len(sg); addr = sg_dma_address(sg); @@ -3404,8 +3374,7 @@ static int goya_pin_memory_before_cs(struct hl_device *hdev, list_add_tail(&userptr->job_node, parser->job_userptr_list); - rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, - userptr->sgt->nents, dir); + rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir); if (rc) { dev_err(hdev->dev, "failed to map sgt with DMA region\n"); goto unpin_memory; @@ -3869,7 +3838,7 @@ static int goya_patch_dma_packet(struct hl_device *hdev, sgt = userptr->sgt; dma_desc_cnt = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { + for_each_sgtable_dma_sg(sgt, sg, count) { len = sg_dma_len(sg); dma_addr = sg_dma_address(sg); @@ -4032,7 +4001,7 @@ static int goya_patch_cb(struct hl_device *hdev, static int goya_parse_cb_mmu(struct hl_device *hdev, struct hl_cs_parser *parser) { - u64 patched_cb_handle; + u64 handle; u32 patched_cb_size; struct hl_cb *user_cb; int rc; @@ -4045,9 +4014,9 @@ static int goya_parse_cb_mmu(struct hl_device *hdev, parser->patched_cb_size = parser->user_cb_size + sizeof(struct packet_msg_prot) * 2; - rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, parser->patched_cb_size, false, false, - &patched_cb_handle); + &handle); if (rc) { dev_err(hdev->dev, @@ -4056,13 +4025,10 @@ static int goya_parse_cb_mmu(struct hl_device *hdev, return rc; } - patched_cb_handle >>= PAGE_SHIFT; - parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, - (u32) patched_cb_handle); + parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); /* hl_cb_get should never fail here */ if (!parser->patched_cb) { - dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); rc = -EFAULT; goto out; } @@ -4102,8 +4068,7 @@ out: * cb_put will release it, but here we want to remove it from the * idr */ - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, - patched_cb_handle << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, handle); return rc; } @@ -4111,7 +4076,7 @@ out: static int goya_parse_cb_no_mmu(struct hl_device *hdev, struct hl_cs_parser *parser) { - u64 patched_cb_handle; + u64 handle; int rc; rc = goya_validate_cb(hdev, parser, false); @@ -4119,22 +4084,19 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev, if (rc) goto free_userptr; - rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, parser->patched_cb_size, false, false, - &patched_cb_handle); + &handle); if (rc) { dev_err(hdev->dev, "Failed to allocate patched CB for DMA CS %d\n", rc); goto free_userptr; } - patched_cb_handle >>= PAGE_SHIFT; - parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, - (u32) patched_cb_handle); + parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); /* hl_cb_get should never fail here */ if (!parser->patched_cb) { - dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); rc = -EFAULT; goto out; } @@ -4151,8 +4113,7 @@ out: * cb_put will release it, but here we want to remove it from the * idr */ - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, - patched_cb_handle << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, handle); free_userptr: if (rc) @@ -4259,224 +4220,7 @@ static void goya_clear_sm_regs(struct hl_device *hdev) i = RREG32(mmSYNC_MNGR_SOB_OBJ_0); } -/* - * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped - * address. - * - * @hdev: pointer to hl_device structure - * @addr: device or host mapped address - * @val: returned value - * - * In case of DDR address that is not mapped into the default aperture that - * the DDR bar exposes, the function will configure the iATU so that the DDR - * bar will be positioned at a base address that allows reading from the - * required address. Configuring the iATU during normal operation can - * lead to undefined behavior and therefore, should be done with extreme care - * - */ -static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, - bool user_address, u32 *val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 ddr_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - *val = RREG32(addr - CFG_BASE); - - } else if ((addr >= SRAM_BASE_ADDR) && - (addr < SRAM_BASE_ADDR + SRAM_SIZE)) { - - *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] + - (addr - SRAM_BASE_ADDR)); - - } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { - - u64 bar_base_addr = DRAM_PHYS_BASE + - (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); - if (ddr_bar_addr != U64_MAX) { - *val = readl(hdev->pcie_bar[DDR_BAR_ID] + - (addr - bar_base_addr)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, - ddr_bar_addr); - } - if (ddr_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE); - - } else { - rc = -EFAULT; - } - - return rc; -} - -/* - * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped - * address. - * - * @hdev: pointer to hl_device structure - * @addr: device or host mapped address - * @val: returned value - * - * In case of DDR address that is not mapped into the default aperture that - * the DDR bar exposes, the function will configure the iATU so that the DDR - * bar will be positioned at a base address that allows writing to the - * required address. Configuring the iATU during normal operation can - * lead to undefined behavior and therefore, should be done with extreme care - * - */ -static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, - bool user_address, u32 val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 ddr_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - WREG32(addr - CFG_BASE, val); - - } else if ((addr >= SRAM_BASE_ADDR) && - (addr < SRAM_BASE_ADDR + SRAM_SIZE)) { - - writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] + - (addr - SRAM_BASE_ADDR)); - - } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { - - u64 bar_base_addr = DRAM_PHYS_BASE + - (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); - if (ddr_bar_addr != U64_MAX) { - writel(val, hdev->pcie_bar[DDR_BAR_ID] + - (addr - bar_base_addr)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, - ddr_bar_addr); - } - if (ddr_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; - - } else { - rc = -EFAULT; - } - - return rc; -} - -static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, - bool user_address, u64 *val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 ddr_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - u32 val_l = RREG32(addr - CFG_BASE); - u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE); - - *val = (((u64) val_h) << 32) | val_l; - - } else if ((addr >= SRAM_BASE_ADDR) && - (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) { - - *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] + - (addr - SRAM_BASE_ADDR)); - - } else if (addr <= - DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { - - u64 bar_base_addr = DRAM_PHYS_BASE + - (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); - if (ddr_bar_addr != U64_MAX) { - *val = readq(hdev->pcie_bar[DDR_BAR_ID] + - (addr - bar_base_addr)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, - ddr_bar_addr); - } - if (ddr_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE); - - } else { - rc = -EFAULT; - } - - return rc; -} - -static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, - bool user_address, u64 val) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 ddr_bar_addr, host_phys_end; - int rc = 0; - - host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE; - - if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - WREG32(addr - CFG_BASE, lower_32_bits(val)); - WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val)); - - } else if ((addr >= SRAM_BASE_ADDR) && - (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) { - - writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] + - (addr - SRAM_BASE_ADDR)); - - } else if (addr <= - DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { - - u64 bar_base_addr = DRAM_PHYS_BASE + - (addr & ~(prop->dram_pci_bar_size - 0x1ull)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); - if (ddr_bar_addr != U64_MAX) { - writeq(val, hdev->pcie_bar[DDR_BAR_ID] + - (addr - bar_base_addr)); - - ddr_bar_addr = goya_set_ddr_bar_base(hdev, - ddr_bar_addr); - } - if (ddr_bar_addr == U64_MAX) - rc = -EIO; - - } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end && - user_address && !iommu_present(&pci_bus_type)) { - *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; - - } else { - rc = -EFAULT; - } - - return rc; -} - -static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, - void *blob_addr) +static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr) { dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n"); return -EPERM; @@ -5101,7 +4845,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, release_cb: hl_cb_put(cb); - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); return rc; } @@ -5561,11 +5305,6 @@ static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group) } -static void goya_set_dma_mask_from_fw(struct hl_device *hdev) -{ - hdev->dma_mask = 48; -} - u64 goya_get_device_time(struct hl_device *hdev) { u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32; @@ -5678,6 +5417,22 @@ static u32 *goya_get_stream_master_qid_arr(void) return NULL; } +static void goya_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info) +{ + /* set 0 since multiple pages are not supported */ + info->page_order_bitmask = 0; +} + +static int goya_get_monitor_dump(struct hl_device *hdev, void *data) +{ + return -EOPNOTSUPP; +} + +static int goya_scrub_device_dram(struct hl_device *hdev, u64 val) +{ + return -EOPNOTSUPP; +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -5696,24 +5451,21 @@ static const struct hl_asic_funcs goya_funcs = { .asic_dma_alloc_coherent = goya_dma_alloc_coherent, .asic_dma_free_coherent = goya_dma_free_coherent, .scrub_device_mem = goya_scrub_device_mem, + .scrub_device_dram = goya_scrub_device_dram, .get_int_queue_base = goya_get_int_queue_base, .test_queues = goya_test_queues, .asic_dma_pool_zalloc = goya_dma_pool_zalloc, .asic_dma_pool_free = goya_dma_pool_free, .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc, .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free, - .hl_dma_unmap_sg = goya_dma_unmap_sg, + .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable, .cs_parser = goya_cs_parser, - .asic_dma_map_sg = goya_dma_map_sg, + .asic_dma_map_sgtable = hl_dma_map_sgtable, .get_dma_desc_list_size = goya_get_dma_desc_list_size, .add_end_of_cb_packets = goya_add_end_of_cb_packets, .update_eq_ci = goya_update_eq_ci, .context_switch = goya_context_switch, .restore_phase_topology = goya_restore_phase_topology, - .debugfs_read32 = goya_debugfs_read32, - .debugfs_write32 = goya_debugfs_write32, - .debugfs_read64 = goya_debugfs_read64, - .debugfs_write64 = goya_debugfs_write64, .debugfs_read_dma = goya_debugfs_read_dma, .add_device_attr = goya_add_device_attr, .handle_eqe = goya_handle_eqe, @@ -5722,6 +5474,7 @@ static const struct hl_asic_funcs goya_funcs = { .write_pte = goya_write_pte, .mmu_invalidate_cache = goya_mmu_invalidate_cache, .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, + .mmu_prefetch_cache_range = NULL, .send_heartbeat = goya_send_heartbeat, .debug_coresight = goya_debug_coresight, .is_device_idle = goya_is_device_idle, @@ -5730,6 +5483,7 @@ static const struct hl_asic_funcs goya_funcs = { .hw_queues_unlock = goya_hw_queues_unlock, .get_pci_id = goya_get_pci_id, .get_eeprom_data = goya_get_eeprom_data, + .get_monitor_dump = goya_get_monitor_dump, .send_cpu_message = goya_send_cpu_message, .pci_bars_map = goya_pci_bars_map, .init_iatu = goya_init_iatu, @@ -5747,7 +5501,6 @@ static const struct hl_asic_funcs goya_funcs = { .gen_wait_cb = goya_gen_wait_cb, .reset_sob = goya_reset_sob, .reset_sob_group = goya_reset_sob_group, - .set_dma_mask_from_fw = goya_set_dma_mask_from_fw, .get_device_time = goya_get_device_time, .collective_wait_init_cs = goya_collective_wait_init_cs, .collective_wait_create_jobs = goya_collective_wait_create_jobs, @@ -5764,7 +5517,11 @@ static const struct hl_asic_funcs goya_funcs = { .get_sob_addr = &goya_get_sob_addr, .set_pci_memory_regions = goya_set_pci_memory_regions, .get_stream_master_qid_arr = goya_get_stream_master_qid_arr, - .is_valid_dram_page_size = NULL + .is_valid_dram_page_size = NULL, + .mmu_get_real_page_size = hl_mmu_get_real_page_size, + .get_valid_dram_page_orders = goya_get_valid_dram_page_orders, + .access_dev_mem = hl_access_dev_mem, + .set_dram_bar_base = goya_set_ddr_bar_base, }; /* diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 65668dac6a5f..38e44b6cf581 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -389,6 +389,14 @@ enum pq_init_status { * * CPUCP_PACKET_ENGINE_CORE_ASID_SET - * Packet to perform engine core ASID configuration + * + * CPUCP_PACKET_MONITOR_DUMP_GET - + * Get monitors registers dump from the CpuCP kernel. + * The CPU will put the registers dump in the a buffer allocated by the driver + * which address is passed via the CpuCp packet. In addition, the host's driver + * passes the max size it allows the CpuCP to write to the structure, to prevent + * data corruption in case of mismatched driver/FW versions. + * Relevant only to Gaudi. */ enum cpucp_packet_id { @@ -439,6 +447,11 @@ enum cpucp_packet_id { CPUCP_PACKET_POWER_SET, /* internal */ CPUCP_PACKET_RESERVED, /* not used */ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */ + CPUCP_PACKET_RESERVED2, /* not used */ + CPUCP_PACKET_RESERVED3, /* not used */ + CPUCP_PACKET_RESERVED4, /* not used */ + CPUCP_PACKET_RESERVED5, /* not used */ + CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */ }; #define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5 @@ -555,6 +568,12 @@ struct cpucp_array_data_packet { __le32 data[]; }; +enum cpucp_led_index { + CPUCP_LED0_INDEX = 0, + CPUCP_LED1_INDEX, + CPUCP_LED2_INDEX +}; + enum cpucp_packet_rc { cpucp_packet_success, cpucp_packet_invalid, @@ -576,7 +595,10 @@ enum cpucp_temp_type { cpucp_temp_offset = 19, cpucp_temp_lowest = 21, cpucp_temp_highest = 22, - cpucp_temp_reset_history = 23 + cpucp_temp_reset_history = 23, + cpucp_temp_warn = 24, + cpucp_temp_max_crit = 25, + cpucp_temp_max_warn = 26, }; enum cpucp_in_attributes { @@ -686,6 +708,7 @@ enum pll_index { enum rl_index { TPC_RL = 0, MME_RL, + EDMA_RL, }; enum pvt_index { @@ -820,6 +843,7 @@ enum cpucp_serdes_type { TYPE_2_SERDES_TYPE, HLS1_SERDES_TYPE, HLS1H_SERDES_TYPE, + HLS2_SERDES_TYPE, UNKNOWN_SERDES_TYPE, MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE }; @@ -833,9 +857,28 @@ struct cpucp_nic_info { __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN]; __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN]; __le16 serdes_type; /* enum cpucp_serdes_type */ + __le16 tx_swap_map[CPUCP_MAX_NICS]; __u8 reserved[6]; }; +#define PAGE_DISCARD_MAX 64 + +struct page_discard_info { + __u8 num_entries; + __u8 reserved[7]; + __le32 mmu_page_idx[PAGE_DISCARD_MAX]; +}; + +/* + * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp". + * @integer: the integer part of the SER value; + * @exp: the exponent part of the SER value. + */ +struct ser_val { + __le16 integer; + __le16 exp; +}; + /* * struct cpucp_nic_status - describes the status of a NIC port. * @port: NIC port index. @@ -889,4 +932,29 @@ struct cpucp_hbm_row_replaced_rows_info { struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX]; }; +/* + * struct dcore_monitor_regs_data - DCORE monitor regs data. + * the structure follows sync manager block layout. relevant only to Gaudi. + * @mon_pay_addrl: array of payload address low bits. + * @mon_pay_addrh: array of payload address high bits. + * @mon_pay_data: array of payload data. + * @mon_arm: array of monitor arm. + * @mon_status: array of monitor status. + */ +struct dcore_monitor_regs_data { + __le32 mon_pay_addrl[512]; + __le32 mon_pay_addrh[512]; + __le32 mon_pay_data[512]; + __le32 mon_arm[512]; + __le32 mon_status[512]; +}; + +/* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */ +struct cpucp_monitor_dump { + struct dcore_monitor_regs_data sync_mngr_w_s; + struct dcore_monitor_regs_data sync_mngr_e_s; + struct dcore_monitor_regs_data sync_mngr_w_n; + struct dcore_monitor_regs_data sync_mngr_e_n; +}; + #endif /* CPUCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h index 758f246627f8..cae8ac8bc5b1 100644 --- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h @@ -34,4 +34,14 @@ #define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */ +enum mmu_hop_num { + MMU_HOP0, + MMU_HOP1, + MMU_HOP2, + MMU_HOP3, + MMU_HOP4, + MMU_HOP5, + MMU_HOP_MAX, +}; + #endif /* INCLUDE_MMU_GENERAL_H_ */ diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index f21854ac5cc2..009239ad1d8a 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -68,40 +68,40 @@ void __init lkdtm_bugs_init(int *recur_param) recur_count = *recur_param; } -void lkdtm_PANIC(void) +static void lkdtm_PANIC(void) { panic("dumptest"); } -void lkdtm_BUG(void) +static void lkdtm_BUG(void) { BUG(); } static int warn_counter; -void lkdtm_WARNING(void) +static void lkdtm_WARNING(void) { WARN_ON(++warn_counter); } -void lkdtm_WARNING_MESSAGE(void) +static void lkdtm_WARNING_MESSAGE(void) { WARN(1, "Warning message trigger count: %d\n", ++warn_counter); } -void lkdtm_EXCEPTION(void) +static void lkdtm_EXCEPTION(void) { *((volatile int *) 0) = 0; } -void lkdtm_LOOP(void) +static void lkdtm_LOOP(void) { for (;;) ; } -void lkdtm_EXHAUST_STACK(void) +static void lkdtm_EXHAUST_STACK(void) { pr_info("Calling function with %lu frame size to depth %d ...\n", REC_STACK_SIZE, recur_count); @@ -115,7 +115,7 @@ static noinline void __lkdtm_CORRUPT_STACK(void *stack) } /* This should trip the stack canary, not corrupt the return address. */ -noinline void lkdtm_CORRUPT_STACK(void) +static noinline void lkdtm_CORRUPT_STACK(void) { /* Use default char array length that triggers stack protection. */ char data[8] __aligned(sizeof(void *)); @@ -125,7 +125,7 @@ noinline void lkdtm_CORRUPT_STACK(void) } /* Same as above but will only get a canary with -fstack-protector-strong */ -noinline void lkdtm_CORRUPT_STACK_STRONG(void) +static noinline void lkdtm_CORRUPT_STACK_STRONG(void) { union { unsigned short shorts[4]; @@ -139,7 +139,7 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void) static pid_t stack_pid; static unsigned long stack_addr; -void lkdtm_REPORT_STACK(void) +static void lkdtm_REPORT_STACK(void) { volatile uintptr_t magic; pid_t pid = task_pid_nr(current); @@ -222,7 +222,7 @@ static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack) } } -void lkdtm_REPORT_STACK_CANARY(void) +static void lkdtm_REPORT_STACK_CANARY(void) { /* Use default char array length that triggers stack protection. */ char data[8] __aligned(sizeof(void *)) = { }; @@ -230,7 +230,7 @@ void lkdtm_REPORT_STACK_CANARY(void) __lkdtm_REPORT_STACK_CANARY((void *)&data); } -void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) +static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; u32 *p; @@ -245,21 +245,21 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n"); } -void lkdtm_SOFTLOCKUP(void) +static void lkdtm_SOFTLOCKUP(void) { preempt_disable(); for (;;) cpu_relax(); } -void lkdtm_HARDLOCKUP(void) +static void lkdtm_HARDLOCKUP(void) { local_irq_disable(); for (;;) cpu_relax(); } -void lkdtm_SPINLOCKUP(void) +static void lkdtm_SPINLOCKUP(void) { /* Must be called twice to trigger. */ spin_lock(&lock_me_up); @@ -267,7 +267,7 @@ void lkdtm_SPINLOCKUP(void) __release(&lock_me_up); } -void lkdtm_HUNG_TASK(void) +static void lkdtm_HUNG_TASK(void) { set_current_state(TASK_UNINTERRUPTIBLE); schedule(); @@ -276,7 +276,7 @@ void lkdtm_HUNG_TASK(void) volatile unsigned int huge = INT_MAX - 2; volatile unsigned int ignored; -void lkdtm_OVERFLOW_SIGNED(void) +static void lkdtm_OVERFLOW_SIGNED(void) { int value; @@ -291,7 +291,7 @@ void lkdtm_OVERFLOW_SIGNED(void) } -void lkdtm_OVERFLOW_UNSIGNED(void) +static void lkdtm_OVERFLOW_UNSIGNED(void) { unsigned int value; @@ -319,7 +319,7 @@ struct array_bounds { int three; }; -void lkdtm_ARRAY_BOUNDS(void) +static void lkdtm_ARRAY_BOUNDS(void) { struct array_bounds_flex_array *not_checked; struct array_bounds *checked; @@ -327,6 +327,11 @@ void lkdtm_ARRAY_BOUNDS(void) not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); + if (!not_checked || !checked) { + kfree(not_checked); + kfree(checked); + return; + } pr_info("Array access within bounds ...\n"); /* For both, touch all bytes in the actual member size. */ @@ -346,10 +351,13 @@ void lkdtm_ARRAY_BOUNDS(void) kfree(not_checked); kfree(checked); pr_err("FAIL: survived array bounds overflow!\n"); - pr_expected_config(CONFIG_UBSAN_BOUNDS); + if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) + pr_expected_config(CONFIG_UBSAN_TRAP); + else + pr_expected_config(CONFIG_UBSAN_BOUNDS); } -void lkdtm_CORRUPT_LIST_ADD(void) +static void lkdtm_CORRUPT_LIST_ADD(void) { /* * Initially, an empty list via LIST_HEAD: @@ -389,7 +397,7 @@ void lkdtm_CORRUPT_LIST_ADD(void) } } -void lkdtm_CORRUPT_LIST_DEL(void) +static void lkdtm_CORRUPT_LIST_DEL(void) { LIST_HEAD(test_head); struct lkdtm_list item; @@ -417,7 +425,7 @@ void lkdtm_CORRUPT_LIST_DEL(void) } /* Test that VMAP_STACK is actually allocating with a leading guard page */ -void lkdtm_STACK_GUARD_PAGE_LEADING(void) +static void lkdtm_STACK_GUARD_PAGE_LEADING(void) { const unsigned char *stack = task_stack_page(current); const unsigned char *ptr = stack - 1; @@ -431,7 +439,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void) } /* Test that VMAP_STACK is actually allocating with a trailing guard page */ -void lkdtm_STACK_GUARD_PAGE_TRAILING(void) +static void lkdtm_STACK_GUARD_PAGE_TRAILING(void) { const unsigned char *stack = task_stack_page(current); const unsigned char *ptr = stack + THREAD_SIZE; @@ -444,7 +452,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void) pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte); } -void lkdtm_UNSET_SMEP(void) +static void lkdtm_UNSET_SMEP(void) { #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) #define MOV_CR4_DEPTH 64 @@ -510,7 +518,7 @@ void lkdtm_UNSET_SMEP(void) #endif } -void lkdtm_DOUBLE_FAULT(void) +static void lkdtm_DOUBLE_FAULT(void) { #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) /* @@ -558,7 +566,7 @@ static noinline void change_pac_parameters(void) } #endif -noinline void lkdtm_CORRUPT_PAC(void) +static noinline void lkdtm_CORRUPT_PAC(void) { #ifdef CONFIG_ARM64 #define CORRUPT_PAC_ITERATE 10 @@ -586,3 +594,37 @@ noinline void lkdtm_CORRUPT_PAC(void) pr_err("XFAIL: this test is arm64-only\n"); #endif } + +static struct crashtype crashtypes[] = { + CRASHTYPE(PANIC), + CRASHTYPE(BUG), + CRASHTYPE(WARNING), + CRASHTYPE(WARNING_MESSAGE), + CRASHTYPE(EXCEPTION), + CRASHTYPE(LOOP), + CRASHTYPE(EXHAUST_STACK), + CRASHTYPE(CORRUPT_STACK), + CRASHTYPE(CORRUPT_STACK_STRONG), + CRASHTYPE(REPORT_STACK), + CRASHTYPE(REPORT_STACK_CANARY), + CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), + CRASHTYPE(SOFTLOCKUP), + CRASHTYPE(HARDLOCKUP), + CRASHTYPE(SPINLOCKUP), + CRASHTYPE(HUNG_TASK), + CRASHTYPE(OVERFLOW_SIGNED), + CRASHTYPE(OVERFLOW_UNSIGNED), + CRASHTYPE(ARRAY_BOUNDS), + CRASHTYPE(CORRUPT_LIST_ADD), + CRASHTYPE(CORRUPT_LIST_DEL), + CRASHTYPE(STACK_GUARD_PAGE_LEADING), + CRASHTYPE(STACK_GUARD_PAGE_TRAILING), + CRASHTYPE(UNSET_SMEP), + CRASHTYPE(DOUBLE_FAULT), + CRASHTYPE(CORRUPT_PAC), +}; + +struct crashtype_category bugs_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c index c9aeddef1044..666a7f4bc137 100644 --- a/drivers/misc/lkdtm/cfi.c +++ b/drivers/misc/lkdtm/cfi.c @@ -3,6 +3,7 @@ * This is for all the tests relating directly to Control Flow Integrity. */ #include "lkdtm.h" +#include <asm/page.h> static int called_count; @@ -22,7 +23,7 @@ static noinline int lkdtm_increment_int(int *counter) /* * This tries to call an indirect function with a mismatched prototype. */ -void lkdtm_CFI_FORWARD_PROTO(void) +static void lkdtm_CFI_FORWARD_PROTO(void) { /* * Matches lkdtm_increment_void()'s prototype, but not @@ -41,3 +42,145 @@ void lkdtm_CFI_FORWARD_PROTO(void) pr_err("FAIL: survived mismatched prototype function call!\n"); pr_expected_config(CONFIG_CFI_CLANG); } + +/* + * This can stay local to LKDTM, as there should not be a production reason + * to disable PAC && SCS. + */ +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL +# ifdef CONFIG_ARM64_BTI_KERNEL +# define __no_pac "branch-protection=bti" +# else +# define __no_pac "branch-protection=none" +# endif +# define __no_ret_protection __noscs __attribute__((__target__(__no_pac))) +#else +# define __no_ret_protection __noscs +#endif + +#define no_pac_addr(addr) \ + ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET)) + +/* The ultimate ROP gadget. */ +static noinline __no_ret_protection +void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr) +{ + /* Use of volatile is to make sure final write isn't seen as a dead store. */ + unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1; + + /* Make sure we've found the right place on the stack before writing it. */ + if (no_pac_addr(*ret_addr) == expected) + *ret_addr = (addr); + else + /* Check architecture, stack layout, or compiler behavior... */ + pr_warn("Eek: return address mismatch! %px != %px\n", + *ret_addr, addr); +} + +static noinline +void set_return_addr(unsigned long *expected, unsigned long *addr) +{ + /* Use of volatile is to make sure final write isn't seen as a dead store. */ + unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1; + + /* Make sure we've found the right place on the stack before writing it. */ + if (no_pac_addr(*ret_addr) == expected) + *ret_addr = (addr); + else + /* Check architecture, stack layout, or compiler behavior... */ + pr_warn("Eek: return address mismatch! %px != %px\n", + *ret_addr, addr); +} + +static volatile int force_check; + +static void lkdtm_CFI_BACKWARD(void) +{ + /* Use calculated gotos to keep labels addressable. */ + void *labels[] = {0, &&normal, &&redirected, &&check_normal, &&check_redirected}; + + pr_info("Attempting unchecked stack return address redirection ...\n"); + + /* Always false */ + if (force_check) { + /* + * Prepare to call with NULLs to avoid parameters being treated as + * constants in -02. + */ + set_return_addr_unchecked(NULL, NULL); + set_return_addr(NULL, NULL); + if (force_check) + goto *labels[1]; + if (force_check) + goto *labels[2]; + if (force_check) + goto *labels[3]; + if (force_check) + goto *labels[4]; + return; + } + + /* + * Use fallthrough switch case to keep basic block ordering between + * set_return_addr*() and the label after it. + */ + switch (force_check) { + case 0: + set_return_addr_unchecked(&&normal, &&redirected); + fallthrough; + case 1: +normal: + /* Always true */ + if (!force_check) { + pr_err("FAIL: stack return address manipulation failed!\n"); + /* If we can't redirect "normally", we can't test mitigations. */ + return; + } + break; + default: +redirected: + pr_info("ok: redirected stack return address.\n"); + break; + } + + pr_info("Attempting checked stack return address redirection ...\n"); + + switch (force_check) { + case 0: + set_return_addr(&&check_normal, &&check_redirected); + fallthrough; + case 1: +check_normal: + /* Always true */ + if (!force_check) { + pr_info("ok: control flow unchanged.\n"); + return; + } + +check_redirected: + pr_err("FAIL: stack return address was redirected!\n"); + break; + } + + if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) { + pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL); + return; + } + if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) { + pr_expected_config(CONFIG_SHADOW_CALL_STACK); + return; + } + pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n", + lkdtm_kernel_info, + "CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK"); +} + +static struct crashtype crashtypes[] = { + CRASHTYPE(CFI_FORWARD_PROTO), + CRASHTYPE(CFI_BACKWARD), +}; + +struct crashtype_category cfi_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index e2228b6fc09b..b4712ff196b4 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -86,109 +86,21 @@ static struct crashpoint crashpoints[] = { #endif }; - -/* Crash types. */ -struct crashtype { - const char *name; - void (*func)(void); -}; - -#define CRASHTYPE(_name) \ - { \ - .name = __stringify(_name), \ - .func = lkdtm_ ## _name, \ - } - -/* Define the possible types of crashes that can be triggered. */ -static const struct crashtype crashtypes[] = { - CRASHTYPE(PANIC), - CRASHTYPE(BUG), - CRASHTYPE(WARNING), - CRASHTYPE(WARNING_MESSAGE), - CRASHTYPE(EXCEPTION), - CRASHTYPE(LOOP), - CRASHTYPE(EXHAUST_STACK), - CRASHTYPE(CORRUPT_STACK), - CRASHTYPE(CORRUPT_STACK_STRONG), - CRASHTYPE(REPORT_STACK), - CRASHTYPE(REPORT_STACK_CANARY), - CRASHTYPE(CORRUPT_LIST_ADD), - CRASHTYPE(CORRUPT_LIST_DEL), - CRASHTYPE(STACK_GUARD_PAGE_LEADING), - CRASHTYPE(STACK_GUARD_PAGE_TRAILING), - CRASHTYPE(UNSET_SMEP), - CRASHTYPE(CORRUPT_PAC), - CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), - CRASHTYPE(SLAB_LINEAR_OVERFLOW), - CRASHTYPE(VMALLOC_LINEAR_OVERFLOW), - CRASHTYPE(WRITE_AFTER_FREE), - CRASHTYPE(READ_AFTER_FREE), - CRASHTYPE(WRITE_BUDDY_AFTER_FREE), - CRASHTYPE(READ_BUDDY_AFTER_FREE), - CRASHTYPE(SLAB_INIT_ON_ALLOC), - CRASHTYPE(BUDDY_INIT_ON_ALLOC), - CRASHTYPE(SLAB_FREE_DOUBLE), - CRASHTYPE(SLAB_FREE_CROSS), - CRASHTYPE(SLAB_FREE_PAGE), - CRASHTYPE(SOFTLOCKUP), - CRASHTYPE(HARDLOCKUP), - CRASHTYPE(SPINLOCKUP), - CRASHTYPE(HUNG_TASK), - CRASHTYPE(OVERFLOW_SIGNED), - CRASHTYPE(OVERFLOW_UNSIGNED), - CRASHTYPE(ARRAY_BOUNDS), - CRASHTYPE(EXEC_DATA), - CRASHTYPE(EXEC_STACK), - CRASHTYPE(EXEC_KMALLOC), - CRASHTYPE(EXEC_VMALLOC), - CRASHTYPE(EXEC_RODATA), - CRASHTYPE(EXEC_USERSPACE), - CRASHTYPE(EXEC_NULL), - CRASHTYPE(ACCESS_USERSPACE), - CRASHTYPE(ACCESS_NULL), - CRASHTYPE(WRITE_RO), - CRASHTYPE(WRITE_RO_AFTER_INIT), - CRASHTYPE(WRITE_KERN), - CRASHTYPE(WRITE_OPD), - CRASHTYPE(REFCOUNT_INC_OVERFLOW), - CRASHTYPE(REFCOUNT_ADD_OVERFLOW), - CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW), - CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW), - CRASHTYPE(REFCOUNT_DEC_ZERO), - CRASHTYPE(REFCOUNT_DEC_NEGATIVE), - CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE), - CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE), - CRASHTYPE(REFCOUNT_INC_ZERO), - CRASHTYPE(REFCOUNT_ADD_ZERO), - CRASHTYPE(REFCOUNT_INC_SATURATED), - CRASHTYPE(REFCOUNT_DEC_SATURATED), - CRASHTYPE(REFCOUNT_ADD_SATURATED), - CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED), - CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED), - CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED), - CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED), - CRASHTYPE(REFCOUNT_TIMING), - CRASHTYPE(ATOMIC_TIMING), - CRASHTYPE(USERCOPY_HEAP_SIZE_TO), - CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), - CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO), - CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM), - CRASHTYPE(USERCOPY_STACK_FRAME_TO), - CRASHTYPE(USERCOPY_STACK_FRAME_FROM), - CRASHTYPE(USERCOPY_STACK_BEYOND), - CRASHTYPE(USERCOPY_KERNEL), - CRASHTYPE(STACKLEAK_ERASING), - CRASHTYPE(CFI_FORWARD_PROTO), - CRASHTYPE(FORTIFIED_OBJECT), - CRASHTYPE(FORTIFIED_SUBOBJECT), - CRASHTYPE(FORTIFIED_STRSCPY), - CRASHTYPE(DOUBLE_FAULT), +/* List of possible types for crashes that can be triggered. */ +static const struct crashtype_category *crashtype_categories[] = { + &bugs_crashtypes, + &heap_crashtypes, + &perms_crashtypes, + &refcount_crashtypes, + &usercopy_crashtypes, + &stackleak_crashtypes, + &cfi_crashtypes, + &fortify_crashtypes, #ifdef CONFIG_PPC_64S_HASH_MMU - CRASHTYPE(PPC_SLB_MULTIHIT), + &powerpc_crashtypes, #endif }; - /* Global kprobe entry and crashtype. */ static struct kprobe *lkdtm_kprobe; static struct crashpoint *lkdtm_crashpoint; @@ -223,11 +135,16 @@ char *lkdtm_kernel_info; /* Return the crashtype number or NULL if the name is invalid */ static const struct crashtype *find_crashtype(const char *name) { - int i; + int cat, idx; + + for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) { + for (idx = 0; idx < crashtype_categories[cat]->len; idx++) { + struct crashtype *crashtype; - for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { - if (!strcmp(name, crashtypes[i].name)) - return &crashtypes[i]; + crashtype = &crashtype_categories[cat]->crashtypes[idx]; + if (!strcmp(name, crashtype->name)) + return crashtype; + } } return NULL; @@ -347,17 +264,24 @@ static ssize_t lkdtm_debugfs_entry(struct file *f, static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, size_t count, loff_t *off) { + int n, cat, idx; + ssize_t out; char *buf; - int i, n, out; buf = (char *)__get_free_page(GFP_KERNEL); if (buf == NULL) return -ENOMEM; n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n"); - for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { - n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n", - crashtypes[i].name); + + for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) { + for (idx = 0; idx < crashtype_categories[cat]->len; idx++) { + struct crashtype *crashtype; + + crashtype = &crashtype_categories[cat]->crashtypes[idx]; + n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n", + crashtype->name); + } } buf[n] = '\0'; diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c index ab33bb5e2e7a..080293fa3c52 100644 --- a/drivers/misc/lkdtm/fortify.c +++ b/drivers/misc/lkdtm/fortify.c @@ -10,7 +10,7 @@ static volatile int fortify_scratch_space; -void lkdtm_FORTIFIED_OBJECT(void) +static void lkdtm_FORTIFIED_OBJECT(void) { struct target { char a[10]; @@ -31,7 +31,7 @@ void lkdtm_FORTIFIED_OBJECT(void) pr_expected_config(CONFIG_FORTIFY_SOURCE); } -void lkdtm_FORTIFIED_SUBOBJECT(void) +static void lkdtm_FORTIFIED_SUBOBJECT(void) { struct target { char a[10]; @@ -67,7 +67,7 @@ void lkdtm_FORTIFIED_SUBOBJECT(void) * strscpy and generate a panic because there is a write overflow (i.e. src * length is greater than dst length). */ -void lkdtm_FORTIFIED_STRSCPY(void) +static void lkdtm_FORTIFIED_STRSCPY(void) { char *src; char dst[5]; @@ -134,3 +134,14 @@ void lkdtm_FORTIFIED_STRSCPY(void) kfree(src); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(FORTIFIED_OBJECT), + CRASHTYPE(FORTIFIED_SUBOBJECT), + CRASHTYPE(FORTIFIED_STRSCPY), +}; + +struct crashtype_category fortify_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c index 8a92f5a800fa..62516078a619 100644 --- a/drivers/misc/lkdtm/heap.c +++ b/drivers/misc/lkdtm/heap.c @@ -22,8 +22,11 @@ static volatile int __offset = 1; /* * If there aren't guard pages, it's likely that a consecutive allocation will * let us overflow into the second allocation without overwriting something real. + * + * This should always be caught because there is an unconditional unmapped + * page after vmap allocations. */ -void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) +static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) { char *one, *two; @@ -41,8 +44,11 @@ void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) * This tries to stay within the next largest power-of-2 kmalloc cache * to avoid actually overwriting anything important if it's not detected * correctly. + * + * This should get caught by either memory tagging, KASan, or by using + * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y). */ -void lkdtm_SLAB_LINEAR_OVERFLOW(void) +static void lkdtm_SLAB_LINEAR_OVERFLOW(void) { size_t len = 1020; u32 *data = kmalloc(len, GFP_KERNEL); @@ -50,11 +56,12 @@ void lkdtm_SLAB_LINEAR_OVERFLOW(void) return; pr_info("Attempting slab linear overflow ...\n"); + OPTIMIZER_HIDE_VAR(data); data[1024 / sizeof(u32)] = 0x12345678; kfree(data); } -void lkdtm_WRITE_AFTER_FREE(void) +static void lkdtm_WRITE_AFTER_FREE(void) { int *base, *again; size_t len = 1024; @@ -80,7 +87,7 @@ void lkdtm_WRITE_AFTER_FREE(void) pr_info("Hmm, didn't get the same memory range.\n"); } -void lkdtm_READ_AFTER_FREE(void) +static void lkdtm_READ_AFTER_FREE(void) { int *base, *val, saw; size_t len = 1024; @@ -124,7 +131,7 @@ void lkdtm_READ_AFTER_FREE(void) kfree(val); } -void lkdtm_WRITE_BUDDY_AFTER_FREE(void) +static void lkdtm_WRITE_BUDDY_AFTER_FREE(void) { unsigned long p = __get_free_page(GFP_KERNEL); if (!p) { @@ -144,7 +151,7 @@ void lkdtm_WRITE_BUDDY_AFTER_FREE(void) schedule(); } -void lkdtm_READ_BUDDY_AFTER_FREE(void) +static void lkdtm_READ_BUDDY_AFTER_FREE(void) { unsigned long p = __get_free_page(GFP_KERNEL); int saw, *val; @@ -181,7 +188,7 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void) kfree(val); } -void lkdtm_SLAB_INIT_ON_ALLOC(void) +static void lkdtm_SLAB_INIT_ON_ALLOC(void) { u8 *first; u8 *val; @@ -213,7 +220,7 @@ void lkdtm_SLAB_INIT_ON_ALLOC(void) kfree(val); } -void lkdtm_BUDDY_INIT_ON_ALLOC(void) +static void lkdtm_BUDDY_INIT_ON_ALLOC(void) { u8 *first; u8 *val; @@ -246,7 +253,7 @@ void lkdtm_BUDDY_INIT_ON_ALLOC(void) free_page((unsigned long)val); } -void lkdtm_SLAB_FREE_DOUBLE(void) +static void lkdtm_SLAB_FREE_DOUBLE(void) { int *val; @@ -263,7 +270,7 @@ void lkdtm_SLAB_FREE_DOUBLE(void) kmem_cache_free(double_free_cache, val); } -void lkdtm_SLAB_FREE_CROSS(void) +static void lkdtm_SLAB_FREE_CROSS(void) { int *val; @@ -279,7 +286,7 @@ void lkdtm_SLAB_FREE_CROSS(void) kmem_cache_free(b_cache, val); } -void lkdtm_SLAB_FREE_PAGE(void) +static void lkdtm_SLAB_FREE_PAGE(void) { unsigned long p = __get_free_page(GFP_KERNEL); @@ -313,3 +320,22 @@ void __exit lkdtm_heap_exit(void) kmem_cache_destroy(a_cache); kmem_cache_destroy(b_cache); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(SLAB_LINEAR_OVERFLOW), + CRASHTYPE(VMALLOC_LINEAR_OVERFLOW), + CRASHTYPE(WRITE_AFTER_FREE), + CRASHTYPE(READ_AFTER_FREE), + CRASHTYPE(WRITE_BUDDY_AFTER_FREE), + CRASHTYPE(READ_BUDDY_AFTER_FREE), + CRASHTYPE(SLAB_INIT_ON_ALLOC), + CRASHTYPE(BUDDY_INIT_ON_ALLOC), + CRASHTYPE(SLAB_FREE_DOUBLE), + CRASHTYPE(SLAB_FREE_CROSS), + CRASHTYPE(SLAB_FREE_PAGE), +}; + +struct crashtype_category heap_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 305fc2ec3f25..015e0484026b 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -9,19 +9,19 @@ extern char *lkdtm_kernel_info; #define pr_expected_config(kconfig) \ -{ \ +do { \ if (IS_ENABLED(kconfig)) \ pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \ lkdtm_kernel_info); \ else \ pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \ lkdtm_kernel_info); \ -} +} while (0) #ifndef MODULE int lkdtm_check_bool_cmdline(const char *param); #define pr_expected_config_param(kconfig, param) \ -{ \ +do { \ if (IS_ENABLED(kconfig)) { \ switch (lkdtm_check_bool_cmdline(param)) { \ case 0: \ @@ -52,119 +52,49 @@ int lkdtm_check_bool_cmdline(const char *param); break; \ } \ } \ -} +} while (0) #else #define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig) #endif -/* bugs.c */ +/* Crash types. */ +struct crashtype { + const char *name; + void (*func)(void); +}; + +#define CRASHTYPE(_name) \ + { \ + .name = __stringify(_name), \ + .func = lkdtm_ ## _name, \ + } + +/* Category's collection of crashtypes. */ +struct crashtype_category { + struct crashtype *crashtypes; + size_t len; +}; + +/* Each category's crashtypes list. */ +extern struct crashtype_category bugs_crashtypes; +extern struct crashtype_category heap_crashtypes; +extern struct crashtype_category perms_crashtypes; +extern struct crashtype_category refcount_crashtypes; +extern struct crashtype_category usercopy_crashtypes; +extern struct crashtype_category stackleak_crashtypes; +extern struct crashtype_category cfi_crashtypes; +extern struct crashtype_category fortify_crashtypes; +extern struct crashtype_category powerpc_crashtypes; + +/* Each category's init/exit routines. */ void __init lkdtm_bugs_init(int *recur_param); -void lkdtm_PANIC(void); -void lkdtm_BUG(void); -void lkdtm_WARNING(void); -void lkdtm_WARNING_MESSAGE(void); -void lkdtm_EXCEPTION(void); -void lkdtm_LOOP(void); -void lkdtm_EXHAUST_STACK(void); -void lkdtm_CORRUPT_STACK(void); -void lkdtm_CORRUPT_STACK_STRONG(void); -void lkdtm_REPORT_STACK(void); -void lkdtm_REPORT_STACK_CANARY(void); -void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void); -void lkdtm_SOFTLOCKUP(void); -void lkdtm_HARDLOCKUP(void); -void lkdtm_SPINLOCKUP(void); -void lkdtm_HUNG_TASK(void); -void lkdtm_OVERFLOW_SIGNED(void); -void lkdtm_OVERFLOW_UNSIGNED(void); -void lkdtm_ARRAY_BOUNDS(void); -void lkdtm_CORRUPT_LIST_ADD(void); -void lkdtm_CORRUPT_LIST_DEL(void); -void lkdtm_STACK_GUARD_PAGE_LEADING(void); -void lkdtm_STACK_GUARD_PAGE_TRAILING(void); -void lkdtm_UNSET_SMEP(void); -void lkdtm_DOUBLE_FAULT(void); -void lkdtm_CORRUPT_PAC(void); - -/* heap.c */ void __init lkdtm_heap_init(void); void __exit lkdtm_heap_exit(void); -void lkdtm_VMALLOC_LINEAR_OVERFLOW(void); -void lkdtm_SLAB_LINEAR_OVERFLOW(void); -void lkdtm_WRITE_AFTER_FREE(void); -void lkdtm_READ_AFTER_FREE(void); -void lkdtm_WRITE_BUDDY_AFTER_FREE(void); -void lkdtm_READ_BUDDY_AFTER_FREE(void); -void lkdtm_SLAB_INIT_ON_ALLOC(void); -void lkdtm_BUDDY_INIT_ON_ALLOC(void); -void lkdtm_SLAB_FREE_DOUBLE(void); -void lkdtm_SLAB_FREE_CROSS(void); -void lkdtm_SLAB_FREE_PAGE(void); - -/* perms.c */ void __init lkdtm_perms_init(void); -void lkdtm_WRITE_RO(void); -void lkdtm_WRITE_RO_AFTER_INIT(void); -void lkdtm_WRITE_KERN(void); -void lkdtm_WRITE_OPD(void); -void lkdtm_EXEC_DATA(void); -void lkdtm_EXEC_STACK(void); -void lkdtm_EXEC_KMALLOC(void); -void lkdtm_EXEC_VMALLOC(void); -void lkdtm_EXEC_RODATA(void); -void lkdtm_EXEC_USERSPACE(void); -void lkdtm_EXEC_NULL(void); -void lkdtm_ACCESS_USERSPACE(void); -void lkdtm_ACCESS_NULL(void); - -/* refcount.c */ -void lkdtm_REFCOUNT_INC_OVERFLOW(void); -void lkdtm_REFCOUNT_ADD_OVERFLOW(void); -void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void); -void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void); -void lkdtm_REFCOUNT_DEC_ZERO(void); -void lkdtm_REFCOUNT_DEC_NEGATIVE(void); -void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void); -void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void); -void lkdtm_REFCOUNT_INC_ZERO(void); -void lkdtm_REFCOUNT_ADD_ZERO(void); -void lkdtm_REFCOUNT_INC_SATURATED(void); -void lkdtm_REFCOUNT_DEC_SATURATED(void); -void lkdtm_REFCOUNT_ADD_SATURATED(void); -void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void); -void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void); -void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void); -void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void); -void lkdtm_REFCOUNT_TIMING(void); -void lkdtm_ATOMIC_TIMING(void); - -/* rodata.c */ -void lkdtm_rodata_do_nothing(void); - -/* usercopy.c */ void __init lkdtm_usercopy_init(void); void __exit lkdtm_usercopy_exit(void); -void lkdtm_USERCOPY_HEAP_SIZE_TO(void); -void lkdtm_USERCOPY_HEAP_SIZE_FROM(void); -void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void); -void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void); -void lkdtm_USERCOPY_STACK_FRAME_TO(void); -void lkdtm_USERCOPY_STACK_FRAME_FROM(void); -void lkdtm_USERCOPY_STACK_BEYOND(void); -void lkdtm_USERCOPY_KERNEL(void); - -/* stackleak.c */ -void lkdtm_STACKLEAK_ERASING(void); - -/* cfi.c */ -void lkdtm_CFI_FORWARD_PROTO(void); -/* fortify.c */ -void lkdtm_FORTIFIED_OBJECT(void); -void lkdtm_FORTIFIED_SUBOBJECT(void); -void lkdtm_FORTIFIED_STRSCPY(void); - -/* powerpc.c */ -void lkdtm_PPC_SLB_MULTIHIT(void); +/* Special declaration for function-in-rodata. */ +void lkdtm_rodata_do_nothing(void); #endif diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index 2c6aba3ff32b..b93404d65650 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -103,7 +103,7 @@ static void execute_user_location(void *dst) pr_err("FAIL: func returned\n"); } -void lkdtm_WRITE_RO(void) +static void lkdtm_WRITE_RO(void) { /* Explicitly cast away "const" for the test and make volatile. */ volatile unsigned long *ptr = (unsigned long *)&rodata; @@ -113,7 +113,7 @@ void lkdtm_WRITE_RO(void) pr_err("FAIL: survived bad write\n"); } -void lkdtm_WRITE_RO_AFTER_INIT(void) +static void lkdtm_WRITE_RO_AFTER_INIT(void) { volatile unsigned long *ptr = &ro_after_init; @@ -132,7 +132,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void) pr_err("FAIL: survived bad write\n"); } -void lkdtm_WRITE_KERN(void) +static void lkdtm_WRITE_KERN(void) { size_t size; volatile unsigned char *ptr; @@ -149,7 +149,7 @@ void lkdtm_WRITE_KERN(void) do_overwritten(); } -void lkdtm_WRITE_OPD(void) +static void lkdtm_WRITE_OPD(void) { size_t size = sizeof(func_desc_t); void (*func)(void) = do_nothing; @@ -166,38 +166,38 @@ void lkdtm_WRITE_OPD(void) func(); } -void lkdtm_EXEC_DATA(void) +static void lkdtm_EXEC_DATA(void) { execute_location(data_area, CODE_WRITE); } -void lkdtm_EXEC_STACK(void) +static void lkdtm_EXEC_STACK(void) { u8 stack_area[EXEC_SIZE]; execute_location(stack_area, CODE_WRITE); } -void lkdtm_EXEC_KMALLOC(void) +static void lkdtm_EXEC_KMALLOC(void) { u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); execute_location(kmalloc_area, CODE_WRITE); kfree(kmalloc_area); } -void lkdtm_EXEC_VMALLOC(void) +static void lkdtm_EXEC_VMALLOC(void) { u32 *vmalloc_area = vmalloc(EXEC_SIZE); execute_location(vmalloc_area, CODE_WRITE); vfree(vmalloc_area); } -void lkdtm_EXEC_RODATA(void) +static void lkdtm_EXEC_RODATA(void) { execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing), CODE_AS_IS); } -void lkdtm_EXEC_USERSPACE(void) +static void lkdtm_EXEC_USERSPACE(void) { unsigned long user_addr; @@ -212,12 +212,12 @@ void lkdtm_EXEC_USERSPACE(void) vm_munmap(user_addr, PAGE_SIZE); } -void lkdtm_EXEC_NULL(void) +static void lkdtm_EXEC_NULL(void) { execute_location(NULL, CODE_AS_IS); } -void lkdtm_ACCESS_USERSPACE(void) +static void lkdtm_ACCESS_USERSPACE(void) { unsigned long user_addr, tmp = 0; unsigned long *ptr; @@ -250,7 +250,7 @@ void lkdtm_ACCESS_USERSPACE(void) vm_munmap(user_addr, PAGE_SIZE); } -void lkdtm_ACCESS_NULL(void) +static void lkdtm_ACCESS_NULL(void) { unsigned long tmp; volatile unsigned long *ptr = (unsigned long *)NULL; @@ -270,3 +270,24 @@ void __init lkdtm_perms_init(void) /* Make sure we can write to __ro_after_init values during __init */ ro_after_init |= 0xAA; } + +static struct crashtype crashtypes[] = { + CRASHTYPE(WRITE_RO), + CRASHTYPE(WRITE_RO_AFTER_INIT), + CRASHTYPE(WRITE_KERN), + CRASHTYPE(WRITE_OPD), + CRASHTYPE(EXEC_DATA), + CRASHTYPE(EXEC_STACK), + CRASHTYPE(EXEC_KMALLOC), + CRASHTYPE(EXEC_VMALLOC), + CRASHTYPE(EXEC_RODATA), + CRASHTYPE(EXEC_USERSPACE), + CRASHTYPE(EXEC_NULL), + CRASHTYPE(ACCESS_USERSPACE), + CRASHTYPE(ACCESS_NULL), +}; + +struct crashtype_category perms_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c index 077c9f9ed8d0..be385449911a 100644 --- a/drivers/misc/lkdtm/powerpc.c +++ b/drivers/misc/lkdtm/powerpc.c @@ -100,7 +100,7 @@ static void insert_dup_slb_entry_0(void) preempt_enable(); } -void lkdtm_PPC_SLB_MULTIHIT(void) +static void lkdtm_PPC_SLB_MULTIHIT(void) { if (!radix_enabled()) { pr_info("Injecting SLB multihit errors\n"); @@ -118,3 +118,12 @@ void lkdtm_PPC_SLB_MULTIHIT(void) pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n"); } } + +static struct crashtype crashtypes[] = { + CRASHTYPE(PPC_SLB_MULTIHIT), +}; + +struct crashtype_category powerpc_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c index de7c5ab528d9..5cd488f54cfa 100644 --- a/drivers/misc/lkdtm/refcount.c +++ b/drivers/misc/lkdtm/refcount.c @@ -24,7 +24,7 @@ static void overflow_check(refcount_t *ref) * A refcount_inc() above the maximum value of the refcount implementation, * should at least saturate, and at most also WARN. */ -void lkdtm_REFCOUNT_INC_OVERFLOW(void) +static void lkdtm_REFCOUNT_INC_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); @@ -40,7 +40,7 @@ void lkdtm_REFCOUNT_INC_OVERFLOW(void) } /* refcount_add() should behave just like refcount_inc() above. */ -void lkdtm_REFCOUNT_ADD_OVERFLOW(void) +static void lkdtm_REFCOUNT_ADD_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); @@ -58,7 +58,7 @@ void lkdtm_REFCOUNT_ADD_OVERFLOW(void) } /* refcount_inc_not_zero() should behave just like refcount_inc() above. */ -void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) +static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); @@ -70,7 +70,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) } /* refcount_add_not_zero() should behave just like refcount_inc() above. */ -void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void) +static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); @@ -103,7 +103,7 @@ static void check_zero(refcount_t *ref) * zero it should either saturate (when inc-from-zero isn't protected) * or stay at zero (when inc-from-zero is protected) and should WARN for both. */ -void lkdtm_REFCOUNT_DEC_ZERO(void) +static void lkdtm_REFCOUNT_DEC_ZERO(void) { refcount_t zero = REFCOUNT_INIT(2); @@ -142,7 +142,7 @@ static void check_negative(refcount_t *ref, int start) } /* A refcount_dec() going negative should saturate and may WARN. */ -void lkdtm_REFCOUNT_DEC_NEGATIVE(void) +static void lkdtm_REFCOUNT_DEC_NEGATIVE(void) { refcount_t neg = REFCOUNT_INIT(0); @@ -156,7 +156,7 @@ void lkdtm_REFCOUNT_DEC_NEGATIVE(void) * A refcount_dec_and_test() should act like refcount_dec() above when * going negative. */ -void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) +static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) { refcount_t neg = REFCOUNT_INIT(0); @@ -171,7 +171,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) * A refcount_sub_and_test() should act like refcount_dec_and_test() * above when going negative. */ -void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) +static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) { refcount_t neg = REFCOUNT_INIT(3); @@ -203,7 +203,7 @@ static void check_from_zero(refcount_t *ref) /* * A refcount_inc() from zero should pin to zero or saturate and may WARN. */ -void lkdtm_REFCOUNT_INC_ZERO(void) +static void lkdtm_REFCOUNT_INC_ZERO(void) { refcount_t zero = REFCOUNT_INIT(0); @@ -228,7 +228,7 @@ void lkdtm_REFCOUNT_INC_ZERO(void) * A refcount_add() should act like refcount_inc() above when starting * at zero. */ -void lkdtm_REFCOUNT_ADD_ZERO(void) +static void lkdtm_REFCOUNT_ADD_ZERO(void) { refcount_t zero = REFCOUNT_INIT(0); @@ -267,7 +267,7 @@ static void check_saturated(refcount_t *ref) * A refcount_inc() from a saturated value should at most warn about * being saturated already. */ -void lkdtm_REFCOUNT_INC_SATURATED(void) +static void lkdtm_REFCOUNT_INC_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -278,7 +278,7 @@ void lkdtm_REFCOUNT_INC_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_DEC_SATURATED(void) +static void lkdtm_REFCOUNT_DEC_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -289,7 +289,7 @@ void lkdtm_REFCOUNT_DEC_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_ADD_SATURATED(void) +static void lkdtm_REFCOUNT_ADD_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -300,7 +300,7 @@ void lkdtm_REFCOUNT_ADD_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) +static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -312,7 +312,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) +static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -324,7 +324,7 @@ void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) +static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -336,7 +336,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) +static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -348,7 +348,7 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) } /* Used to time the existing atomic_t when used for reference counting */ -void lkdtm_ATOMIC_TIMING(void) +static void lkdtm_ATOMIC_TIMING(void) { unsigned int i; atomic_t count = ATOMIC_INIT(1); @@ -373,7 +373,7 @@ void lkdtm_ATOMIC_TIMING(void) * cd /sys/kernel/debug/provoke-crash * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT */ -void lkdtm_REFCOUNT_TIMING(void) +static void lkdtm_REFCOUNT_TIMING(void) { unsigned int i; refcount_t count = REFCOUNT_INIT(1); @@ -390,3 +390,30 @@ void lkdtm_REFCOUNT_TIMING(void) else pr_info("refcount timing: done\n"); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(REFCOUNT_INC_OVERFLOW), + CRASHTYPE(REFCOUNT_ADD_OVERFLOW), + CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW), + CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW), + CRASHTYPE(REFCOUNT_DEC_ZERO), + CRASHTYPE(REFCOUNT_DEC_NEGATIVE), + CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE), + CRASHTYPE(REFCOUNT_INC_ZERO), + CRASHTYPE(REFCOUNT_ADD_ZERO), + CRASHTYPE(REFCOUNT_INC_SATURATED), + CRASHTYPE(REFCOUNT_DEC_SATURATED), + CRASHTYPE(REFCOUNT_ADD_SATURATED), + CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED), + CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED), + CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED), + CRASHTYPE(ATOMIC_TIMING), + CRASHTYPE(REFCOUNT_TIMING), +}; + +struct crashtype_category refcount_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c index 82369c6f889e..025b133297a6 100644 --- a/drivers/misc/lkdtm/stackleak.c +++ b/drivers/misc/lkdtm/stackleak.c @@ -115,7 +115,7 @@ out: } } -void lkdtm_STACKLEAK_ERASING(void) +static void lkdtm_STACKLEAK_ERASING(void) { unsigned long flags; @@ -124,7 +124,7 @@ void lkdtm_STACKLEAK_ERASING(void) local_irq_restore(flags); } #else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */ -void lkdtm_STACKLEAK_ERASING(void) +static void lkdtm_STACKLEAK_ERASING(void) { if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) { pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n"); @@ -133,3 +133,12 @@ void lkdtm_STACKLEAK_ERASING(void) } } #endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */ + +static struct crashtype crashtypes[] = { + CRASHTYPE(STACKLEAK_ERASING), +}; + +struct crashtype_category stackleak_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c index 9161ce7ed47a..6215ec995cd3 100644 --- a/drivers/misc/lkdtm/usercopy.c +++ b/drivers/misc/lkdtm/usercopy.c @@ -5,6 +5,7 @@ */ #include "lkdtm.h" #include <linux/slab.h> +#include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/sched/task_stack.h> #include <linux/mman.h> @@ -30,12 +31,12 @@ static const unsigned char test_text[] = "This is a test.\n"; */ static noinline unsigned char *trick_compiler(unsigned char *stack) { - return stack + 0; + return stack + unconst; } static noinline unsigned char *do_usercopy_stack_callee(int value) { - unsigned char buf[32]; + unsigned char buf[128]; int i; /* Exercise stack to avoid everything living in registers. */ @@ -43,7 +44,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value) buf[i] = value & 0xff; } - return trick_compiler(buf); + /* + * Put the target buffer in the middle of stack allocation + * so that we don't step on future stack users regardless + * of stack growth direction. + */ + return trick_compiler(&buf[(128/2)-32]); } static noinline void do_usercopy_stack(bool to_user, bool bad_frame) @@ -66,6 +72,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame) bad_stack -= sizeof(unsigned long); } +#ifdef ARCH_HAS_CURRENT_STACK_POINTER + pr_info("stack : %px\n", (void *)current_stack_pointer); +#endif + pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack)); + pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack)); + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); @@ -119,7 +131,7 @@ free_user: * This checks for whole-object size validation with hardened usercopy, * with or without usercopy whitelisting. */ -static void do_usercopy_heap_size(bool to_user) +static void do_usercopy_slab_size(bool to_user) { unsigned long user_addr; unsigned char *one, *two; @@ -185,9 +197,9 @@ free_kernel: /* * This checks for the specific whitelist window within an object. If this - * test passes, then do_usercopy_heap_size() tests will pass too. + * test passes, then do_usercopy_slab_size() tests will pass too. */ -static void do_usercopy_heap_whitelist(bool to_user) +static void do_usercopy_slab_whitelist(bool to_user) { unsigned long user_alloc; unsigned char *buf = NULL; @@ -261,42 +273,42 @@ free_alloc: } /* Callable tests. */ -void lkdtm_USERCOPY_HEAP_SIZE_TO(void) +static void lkdtm_USERCOPY_SLAB_SIZE_TO(void) { - do_usercopy_heap_size(true); + do_usercopy_slab_size(true); } -void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) +static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void) { - do_usercopy_heap_size(false); + do_usercopy_slab_size(false); } -void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void) +static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void) { - do_usercopy_heap_whitelist(true); + do_usercopy_slab_whitelist(true); } -void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void) +static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void) { - do_usercopy_heap_whitelist(false); + do_usercopy_slab_whitelist(false); } -void lkdtm_USERCOPY_STACK_FRAME_TO(void) +static void lkdtm_USERCOPY_STACK_FRAME_TO(void) { do_usercopy_stack(true, true); } -void lkdtm_USERCOPY_STACK_FRAME_FROM(void) +static void lkdtm_USERCOPY_STACK_FRAME_FROM(void) { do_usercopy_stack(false, true); } -void lkdtm_USERCOPY_STACK_BEYOND(void) +static void lkdtm_USERCOPY_STACK_BEYOND(void) { do_usercopy_stack(true, false); } -void lkdtm_USERCOPY_KERNEL(void) +static void lkdtm_USERCOPY_KERNEL(void) { unsigned long user_addr; @@ -330,6 +342,86 @@ free_user: vm_munmap(user_addr, PAGE_SIZE); } +/* + * This expects "kaddr" to point to a PAGE_SIZE allocation, which means + * a more complete test that would include copy_from_user() would risk + * memory corruption. Just test copy_to_user() here, as that exercises + * almost exactly the same code paths. + */ +static void do_usercopy_page_span(const char *name, void *kaddr) +{ + unsigned long uaddr; + + uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (uaddr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + /* Initialize contents. */ + memset(kaddr, 0xAA, PAGE_SIZE); + + /* Bump the kaddr forward to detect a page-spanning overflow. */ + kaddr += PAGE_SIZE / 2; + + pr_info("attempting good copy_to_user() from kernel %s: %px\n", + name, kaddr); + if (copy_to_user((void __user *)uaddr, kaddr, + unconst + (PAGE_SIZE / 2))) { + pr_err("copy_to_user() failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user() from kernel %s: %px\n", + name, kaddr); + if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) { + pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n"); + goto free_user; + } + + pr_err("FAIL: bad copy_to_user() not detected!\n"); + pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy"); + +free_user: + vm_munmap(uaddr, PAGE_SIZE); +} + +static void lkdtm_USERCOPY_VMALLOC(void) +{ + void *addr; + + addr = vmalloc(PAGE_SIZE); + if (!addr) { + pr_err("vmalloc() failed!?\n"); + return; + } + do_usercopy_page_span("vmalloc", addr); + vfree(addr); +} + +static void lkdtm_USERCOPY_FOLIO(void) +{ + struct folio *folio; + void *addr; + + /* + * FIXME: Folio checking currently misses 0-order allocations, so + * allocate and bump forward to the last page. + */ + folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1); + if (!folio) { + pr_err("folio_alloc() failed!?\n"); + return; + } + addr = folio_address(folio); + if (addr) + do_usercopy_page_span("folio", addr + PAGE_SIZE); + else + pr_err("folio_address() failed?!\n"); + folio_put(folio); +} + void __init lkdtm_usercopy_init(void) { /* Prepare cache that lacks SLAB_USERCOPY flag. */ @@ -345,3 +437,21 @@ void __exit lkdtm_usercopy_exit(void) { kmem_cache_destroy(whitelist_cache); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(USERCOPY_SLAB_SIZE_TO), + CRASHTYPE(USERCOPY_SLAB_SIZE_FROM), + CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO), + CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM), + CRASHTYPE(USERCOPY_STACK_FRAME_TO), + CRASHTYPE(USERCOPY_STACK_FRAME_FROM), + CRASHTYPE(USERCOPY_STACK_BEYOND), + CRASHTYPE(USERCOPY_VMALLOC), + CRASHTYPE(USERCOPY_FOLIO), + CRASHTYPE(USERCOPY_KERNEL), +}; + +struct crashtype_category usercopy_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c index 4b8f1c7d726d..049a12006348 100644 --- a/drivers/misc/pvpanic/pvpanic.c +++ b/drivers/misc/pvpanic/pvpanic.c @@ -34,7 +34,9 @@ pvpanic_send_event(unsigned int event) { struct pvpanic_instance *pi_cur; - spin_lock(&pvpanic_lock); + if (!spin_trylock(&pvpanic_lock)) + return; + list_for_each_entry(pi_cur, &pvpanic_list, list) { if (event & pi_cur->capability & pi_cur->events) iowrite8(event, pi_cur->base); @@ -55,9 +57,13 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused return NOTIFY_DONE; } +/* + * Call our notifier very early on panic, deferring the + * action taken to the hypervisor. + */ static struct notifier_block pvpanic_panic_nb = { .notifier_call = pvpanic_panic_notify, - .priority = 1, /* let this called before broken drm_fb_helper() */ + .priority = INT_MAX, }; static void pvpanic_remove(void *param) diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index f1d8ba6d4857..086ce77d9074 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1452,10 +1452,10 @@ static void vmballoon_reset(struct vmballoon *b) error = vmballoon_vmci_init(b); if (error) - pr_err("failed to initialize vmci doorbell\n"); + pr_err_once("failed to initialize vmci doorbell\n"); if (vmballoon_send_guest_id(b)) - pr_err("failed to send guest ID to the host\n"); + pr_err_once("failed to send guest ID to the host\n"); unlock: up_write(&b->conf_sem); diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index 605794aadf11..b6d4d7fd686a 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig @@ -5,7 +5,7 @@ config VMWARE_VMCI tristate "VMware VMCI Driver" - depends on X86 && PCI + depends on (X86 || ARM64) && !CPU_BIG_ENDIAN && PCI help This is VMware's Virtual Machine Communication Interface. It enables high-speed communication between host and guest in a virtual diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index 6cf3e21c7604..172696abce31 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -665,9 +665,8 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid) int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) { struct vmci_ctx *context; - struct vmci_handle_list *notifier, *tmp; + struct vmci_handle_list *notifier = NULL, *iter, *tmp; struct vmci_handle handle; - bool found = false; context = vmci_ctx_get(context_id); if (!context) @@ -676,23 +675,23 @@ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); spin_lock(&context->lock); - list_for_each_entry_safe(notifier, tmp, + list_for_each_entry_safe(iter, tmp, &context->notifier_list, node) { - if (vmci_handle_is_equal(notifier->handle, handle)) { - list_del_rcu(¬ifier->node); + if (vmci_handle_is_equal(iter->handle, handle)) { + list_del_rcu(&iter->node); context->n_notifiers--; - found = true; + notifier = iter; break; } } spin_unlock(&context->lock); - if (found) + if (notifier) kvfree_rcu(notifier); vmci_ctx_put(context); - return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND; + return notifier ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND; } static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 57a6157209a1..aa7b05de97dd 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -614,6 +614,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, } if (!mmio_base) { + if (IS_ENABLED(CONFIG_ARM64)) { + dev_err(&pdev->dev, "MMIO base is invalid\n"); + return -ENXIO; + } error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME); if (error) { dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 94ebf7f3fd58..8f2de1893245 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -2577,6 +2577,12 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, if (result < VMCI_SUCCESS) return result; + /* + * This virt_wmb() ensures that data written to the queue + * is observable before the new producer_tail is. + */ + virt_wmb(); + vmci_q_header_add_producer_tail(produce_q->q_header, written, produce_q_size); return written; @@ -2620,6 +2626,12 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, if (buf_ready < VMCI_SUCCESS) return (ssize_t) buf_ready; + /* + * This virt_rmb() ensures that data from the queue will be read + * after we have determined how much is ready to be consumed. + */ + virt_rmb(); + read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); head = vmci_q_header_consumer_head(produce_q->q_header); if (likely(head + read < consume_q_size)) { diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 555aa77a574d..967d0084800e 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -304,6 +304,7 @@ config NVMEM_LAYERSCAPE_SFP tristate "Layerscape SFP (Security Fuse Processor) support" depends on ARCH_LAYERSCAPE || COMPILE_TEST depends on HAS_IOMEM + select REGMAP_MMIO help This driver provides support to read the eFuses on Freescale Layerscape SoC's. For example, the vendor provides a per part @@ -324,4 +325,16 @@ config NVMEM_SUNPLUS_OCOTP This driver can also be built as a module. If so, the module will be called nvmem-sunplus-ocotp. +config NVMEM_APPLE_EFUSES + tristate "Apple eFuse support" + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + help + Say y here to enable support for reading eFuses on Apple SoCs + such as the M1. These are e.g. used to store factory programmed + calibration data required for the PCIe or the USB-C PHY. + + This driver can also be built as a module. If so, the module will + be called nvmem-apple-efuses. + endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 891958e29d25..00e136a0a123 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -65,3 +65,5 @@ obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o nvmem-layerscape-sfp-y := layerscape-sfp.o obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o nvmem_sunplus_ocotp-y := sunplus-ocotp.o +obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o +nvmem-apple-efuses-y := apple-efuses.o diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c new file mode 100644 index 000000000000..9b7c87102104 --- /dev/null +++ b/drivers/nvmem/apple-efuses.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Apple SoC eFuse driver + * + * Copyright (C) The Asahi Linux Contributors + */ + +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct apple_efuses_priv { + void __iomem *fuses; +}; + +static int apple_efuses_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct apple_efuses_priv *priv = context; + u32 *dst = val; + + while (bytes >= sizeof(u32)) { + *dst++ = readl_relaxed(priv->fuses + offset); + bytes -= sizeof(u32); + offset += sizeof(u32); + } + + return 0; +} + +static int apple_efuses_probe(struct platform_device *pdev) +{ + struct apple_efuses_priv *priv; + struct resource *res; + struct nvmem_config config = { + .dev = &pdev->dev, + .read_only = true, + .reg_read = apple_efuses_read, + .stride = sizeof(u32), + .word_size = sizeof(u32), + .name = "apple_efuses_nvmem", + .id = NVMEM_DEVID_AUTO, + .root_only = true, + }; + + priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->fuses)) + return PTR_ERR(priv->fuses); + + config.priv = priv; + config.size = resource_size(res); + + return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config)); +} + +static const struct of_device_id apple_efuses_of_match[] = { + { .compatible = "apple,efuses", }, + {} +}; + +MODULE_DEVICE_TABLE(of, apple_efuses_of_match); + +static struct platform_driver apple_efuses_driver = { + .driver = { + .name = "apple_efuses", + .of_match_table = apple_efuses_of_match, + }, + .probe = apple_efuses_probe, +}; + +module_platform_driver(apple_efuses_driver); + +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c index a8097511582a..dfea96c52463 100644 --- a/drivers/nvmem/bcm-ocotp.c +++ b/drivers/nvmem/bcm-ocotp.c @@ -244,7 +244,7 @@ static const struct of_device_id bcm_otpc_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); -static const struct acpi_device_id bcm_otpc_acpi_ids[] = { +static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = { { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map }, { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 }, { /* sentinel */ } diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c index 439f00b9eef6..450b927691c3 100644 --- a/drivers/nvmem/brcm_nvram.c +++ b/drivers/nvmem/brcm_nvram.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/nvmem-provider.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -72,6 +73,7 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data, return -ENOMEM; priv->cells[idx].offset = value - (char *)data; priv->cells[idx].bytes = strlen(value); + priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); } return 0; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index f58d9bc7aa08..1e3c754efd0d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -467,6 +467,7 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, cell->bit_offset = info->bit_offset; cell->nbits = info->nbits; + cell->np = info->np; if (cell->nbits) cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, diff --git a/drivers/nvmem/layerscape-sfp.c b/drivers/nvmem/layerscape-sfp.c index e591c1511e33..e2b424561949 100644 --- a/drivers/nvmem/layerscape-sfp.c +++ b/drivers/nvmem/layerscape-sfp.c @@ -13,15 +13,17 @@ #include <linux/nvmem-provider.h> #include <linux/platform_device.h> #include <linux/property.h> +#include <linux/regmap.h> #define LAYERSCAPE_SFP_OTP_OFFSET 0x0200 struct layerscape_sfp_priv { - void __iomem *base; + struct regmap *regmap; }; struct layerscape_sfp_data { int size; + enum regmap_endian endian; }; static int layerscape_sfp_read(void *context, unsigned int offset, void *val, @@ -29,15 +31,16 @@ static int layerscape_sfp_read(void *context, unsigned int offset, void *val, { struct layerscape_sfp_priv *priv = context; - memcpy_fromio(val, priv->base + LAYERSCAPE_SFP_OTP_OFFSET + offset, - bytes); - - return 0; + return regmap_bulk_read(priv->regmap, + LAYERSCAPE_SFP_OTP_OFFSET + offset, val, + bytes / 4); } static struct nvmem_config layerscape_sfp_nvmem_config = { .name = "fsl-sfp", .reg_read = layerscape_sfp_read, + .word_size = 4, + .stride = 4, }; static int layerscape_sfp_probe(struct platform_device *pdev) @@ -45,16 +48,26 @@ static int layerscape_sfp_probe(struct platform_device *pdev) const struct layerscape_sfp_data *data; struct layerscape_sfp_priv *priv; struct nvmem_device *nvmem; + struct regmap_config config = { 0 }; + void __iomem *base; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - priv->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); data = device_get_match_data(&pdev->dev); + config.reg_bits = 32; + config.reg_stride = 4; + config.val_bits = 32; + config.val_format_endian = data->endian; + config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4; + priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); layerscape_sfp_nvmem_config.size = data->size; layerscape_sfp_nvmem_config.dev = &pdev->dev; @@ -65,11 +78,18 @@ static int layerscape_sfp_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(nvmem); } +static const struct layerscape_sfp_data ls1021a_data = { + .size = 0x88, + .endian = REGMAP_ENDIAN_BIG, +}; + static const struct layerscape_sfp_data ls1028a_data = { .size = 0x88, + .endian = REGMAP_ENDIAN_LITTLE, }; static const struct of_device_id layerscape_sfp_dt_ids[] = { + { .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data }, { .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data }, {}, }; diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 162132c7dab9..c1e893c8a247 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -217,9 +217,8 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv, goto err_clk_rate_set; } - ret = pm_runtime_get_sync(priv->dev); + ret = pm_runtime_resume_and_get(priv->dev); if (ret < 0) { - pm_runtime_put_noidle(priv->dev); dev_err(priv->dev, "Failed to enable power-domain\n"); goto err_reg_enable; } diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c index 2dc59c22eb55..52b928a7a6d5 100644 --- a/drivers/nvmem/sunplus-ocotp.c +++ b/drivers/nvmem/sunplus-ocotp.c @@ -71,7 +71,7 @@ struct sp_ocotp_data { int size; }; -const struct sp_ocotp_data sp_otp_v0 = { +static const struct sp_ocotp_data sp_otp_v0 = { .size = QAC628_OTP_SIZE, }; @@ -202,8 +202,6 @@ static int sp_ocotp_probe(struct platform_device *pdev) (int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK, (int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE); - dev_info(dev, "by Sunplus (C) 2020"); - return 0; } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 82b63e60c5a2..300b0f2b5f84 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -64,6 +64,7 @@ config USB_LGM_PHY config PHY_CAN_TRANSCEIVER tristate "CAN transceiver PHY" select GENERIC_PHY + select MULTIPLEXER help This option enables support for CAN transceivers as a PHY. This driver provides function for putting the transceivers in various diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c index f0bc87d654d4..3900f1650851 100644 --- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c +++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c @@ -24,6 +24,14 @@ #define SUN6I_DPHY_TX_CTL_REG 0x04 #define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT BIT(28) +#define SUN6I_DPHY_RX_CTL_REG 0x08 +#define SUN6I_DPHY_RX_CTL_EN_DBC BIT(31) +#define SUN6I_DPHY_RX_CTL_RX_CLK_FORCE BIT(24) +#define SUN6I_DPHY_RX_CTL_RX_D3_FORCE BIT(23) +#define SUN6I_DPHY_RX_CTL_RX_D2_FORCE BIT(22) +#define SUN6I_DPHY_RX_CTL_RX_D1_FORCE BIT(21) +#define SUN6I_DPHY_RX_CTL_RX_D0_FORCE BIT(20) + #define SUN6I_DPHY_TX_TIME0_REG 0x10 #define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n) (((n) & 0xff) << 24) #define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n) (((n) & 0xff) << 16) @@ -44,12 +52,29 @@ #define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n) (((n) & 0xff) << 8) #define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n) ((n) & 0xff) +#define SUN6I_DPHY_RX_TIME0_REG 0x30 +#define SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(n) (((n) & 0xff) << 24) +#define SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(n) (((n) & 0xff) << 16) +#define SUN6I_DPHY_RX_TIME0_LP_RX(n) (((n) & 0xff) << 8) + +#define SUN6I_DPHY_RX_TIME1_REG 0x34 +#define SUN6I_DPHY_RX_TIME1_RX_DLY(n) (((n) & 0xfff) << 20) +#define SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(n) ((n) & 0xfffff) + +#define SUN6I_DPHY_RX_TIME2_REG 0x38 +#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA1(n) (((n) & 0xff) << 8) +#define SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(n) ((n) & 0xff) + +#define SUN6I_DPHY_RX_TIME3_REG 0x40 +#define SUN6I_DPHY_RX_TIME3_LPRST_DLY(n) (((n) & 0xffff) << 16) + #define SUN6I_DPHY_ANA0_REG 0x4c #define SUN6I_DPHY_ANA0_REG_PWS BIT(31) #define SUN6I_DPHY_ANA0_REG_DMPC BIT(28) #define SUN6I_DPHY_ANA0_REG_DMPD(n) (((n) & 0xf) << 24) #define SUN6I_DPHY_ANA0_REG_SLV(n) (((n) & 7) << 12) #define SUN6I_DPHY_ANA0_REG_DEN(n) (((n) & 0xf) << 8) +#define SUN6I_DPHY_ANA0_REG_SFB(n) (((n) & 3) << 2) #define SUN6I_DPHY_ANA1_REG 0x50 #define SUN6I_DPHY_ANA1_REG_VTTMODE BIT(31) @@ -84,6 +109,11 @@ #define SUN6I_DPHY_DBG5_REG 0xf4 +enum sun6i_dphy_direction { + SUN6I_DPHY_DIRECTION_TX, + SUN6I_DPHY_DIRECTION_RX, +}; + struct sun6i_dphy { struct clk *bus_clk; struct clk *mod_clk; @@ -92,6 +122,8 @@ struct sun6i_dphy { struct phy *phy; struct phy_configure_opts_mipi_dphy config; + + enum sun6i_dphy_direction direction; }; static int sun6i_dphy_init(struct phy *phy) @@ -119,9 +151,8 @@ static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts) return 0; } -static int sun6i_dphy_power_on(struct phy *phy) +static int sun6i_dphy_tx_power_on(struct sun6i_dphy *dphy) { - struct sun6i_dphy *dphy = phy_get_drvdata(phy); u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0); regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG, @@ -211,12 +242,129 @@ static int sun6i_dphy_power_on(struct phy *phy) return 0; } +static int sun6i_dphy_rx_power_on(struct sun6i_dphy *dphy) +{ + /* Physical clock rate is actually half of symbol rate with DDR. */ + unsigned long mipi_symbol_rate = dphy->config.hs_clk_rate; + unsigned long dphy_clk_rate; + unsigned int rx_dly; + unsigned int lprst_dly; + u32 value; + + dphy_clk_rate = clk_get_rate(dphy->mod_clk); + if (!dphy_clk_rate) + return -EINVAL; + + /* Hardcoded timing parameters from the Allwinner BSP. */ + regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME0_REG, + SUN6I_DPHY_RX_TIME0_HS_RX_SYNC(255) | + SUN6I_DPHY_RX_TIME0_HS_RX_CLK_MISS(255) | + SUN6I_DPHY_RX_TIME0_LP_RX(255)); + + /* + * Formula from the Allwinner BSP, with hardcoded coefficients + * (probably internal divider/multiplier). + */ + rx_dly = 8 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 8)); + + /* + * The Allwinner BSP has an alternative formula for LP_RX_ULPS_WP: + * lp_ulps_wp_cnt = lp_ulps_wp_ms * lp_clk / 1000 + * but does not use it and hardcodes 255 instead. + */ + regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME1_REG, + SUN6I_DPHY_RX_TIME1_RX_DLY(rx_dly) | + SUN6I_DPHY_RX_TIME1_LP_RX_ULPS_WP(255)); + + /* HS_RX_ANA0 value is hardcoded in the Allwinner BSP. */ + regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME2_REG, + SUN6I_DPHY_RX_TIME2_HS_RX_ANA0(4)); + + /* + * Formula from the Allwinner BSP, with hardcoded coefficients + * (probably internal divider/multiplier). + */ + lprst_dly = 4 * (unsigned int)(dphy_clk_rate / (mipi_symbol_rate / 2)); + + regmap_write(dphy->regs, SUN6I_DPHY_RX_TIME3_REG, + SUN6I_DPHY_RX_TIME3_LPRST_DLY(lprst_dly)); + + /* Analog parameters are hardcoded in the Allwinner BSP. */ + regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, + SUN6I_DPHY_ANA0_REG_PWS | + SUN6I_DPHY_ANA0_REG_SLV(7) | + SUN6I_DPHY_ANA0_REG_SFB(2)); + + regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG, + SUN6I_DPHY_ANA1_REG_SVTT(4)); + + regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG, + SUN6I_DPHY_ANA4_REG_DMPLVC | + SUN6I_DPHY_ANA4_REG_DMPLVD(1)); + + regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG, + SUN6I_DPHY_ANA2_REG_ENIB); + + regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG, + SUN6I_DPHY_ANA3_EN_LDOR | + SUN6I_DPHY_ANA3_EN_LDOC | + SUN6I_DPHY_ANA3_EN_LDOD); + + /* + * Delay comes from the Allwinner BSP, likely for internal regulator + * ramp-up. + */ + udelay(3); + + value = SUN6I_DPHY_RX_CTL_EN_DBC | SUN6I_DPHY_RX_CTL_RX_CLK_FORCE; + + /* + * Rx data lane force-enable bits are used as regular RX enable by the + * Allwinner BSP. + */ + if (dphy->config.lanes >= 1) + value |= SUN6I_DPHY_RX_CTL_RX_D0_FORCE; + if (dphy->config.lanes >= 2) + value |= SUN6I_DPHY_RX_CTL_RX_D1_FORCE; + if (dphy->config.lanes >= 3) + value |= SUN6I_DPHY_RX_CTL_RX_D2_FORCE; + if (dphy->config.lanes == 4) + value |= SUN6I_DPHY_RX_CTL_RX_D3_FORCE; + + regmap_write(dphy->regs, SUN6I_DPHY_RX_CTL_REG, value); + + regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, + SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) | + SUN6I_DPHY_GCTL_EN); + + return 0; +} + +static int sun6i_dphy_power_on(struct phy *phy) +{ + struct sun6i_dphy *dphy = phy_get_drvdata(phy); + + switch (dphy->direction) { + case SUN6I_DPHY_DIRECTION_TX: + return sun6i_dphy_tx_power_on(dphy); + case SUN6I_DPHY_DIRECTION_RX: + return sun6i_dphy_rx_power_on(dphy); + default: + return -EINVAL; + } +} + static int sun6i_dphy_power_off(struct phy *phy) { struct sun6i_dphy *dphy = phy_get_drvdata(phy); - regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG, - SUN6I_DPHY_ANA1_REG_VTTMODE, 0); + regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, 0); + + regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, 0); + regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG, 0); + regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG, 0); + regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG, 0); + regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG, 0); return 0; } @@ -253,7 +401,9 @@ static int sun6i_dphy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct sun6i_dphy *dphy; + const char *direction; void __iomem *regs; + int ret; dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL); if (!dphy) @@ -290,6 +440,14 @@ static int sun6i_dphy_probe(struct platform_device *pdev) return PTR_ERR(dphy->phy); } + dphy->direction = SUN6I_DPHY_DIRECTION_TX; + + ret = of_property_read_string(pdev->dev.of_node, "allwinner,direction", + &direction); + + if (!ret && !strncmp(direction, "rx", 2)) + dphy->direction = SUN6I_DPHY_DIRECTION_RX; + phy_set_drvdata(dphy->phy, dphy); phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index 6b917f7bddbe..73fb99ccd525 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -83,6 +83,7 @@ #define SIERRA_DFE_BIASTRIM_PREG 0x04C #define SIERRA_DRVCTRL_ATTEN_PREG 0x06A #define SIERRA_DRVCTRL_BOOST_PREG 0x06F +#define SIERRA_TX_RCVDET_OVRD_PREG 0x072 #define SIERRA_CLKPATHCTRL_TMR_PREG 0x081 #define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085 #define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086 @@ -1684,6 +1685,66 @@ static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = { .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs), }; +/* + * TI J721E: + * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc, + * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz + */ +static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x0004, SIERRA_PSC_LN_A3_PREG}, + {0x0004, SIERRA_PSC_LN_A4_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}, + {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} +}; + +static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = { + .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs, + .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs), +}; + /* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */ static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = { {0x000E, SIERRA_CMN_PLLLC_MODE_PREG}, @@ -1765,6 +1826,69 @@ static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = { .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs), }; +/* + * TI J721E: + * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc, + * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz + */ +static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x0004, SIERRA_PSC_LN_A3_PREG}, + {0x0004, SIERRA_PSC_LN_A4_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}, + {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} +}; + +static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = { + .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs, + .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs), +}; + /* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */ static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = { {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, @@ -1840,6 +1964,69 @@ static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = { .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs), }; +/* + * TI J721E: + * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc, + * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz + */ +static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = { + {0xFC08, SIERRA_DET_STANDEC_A_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x0004, SIERRA_PSC_LN_A3_PREG}, + {0x0004, SIERRA_PSC_LN_A4_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x1555, SIERRA_DFE_BIASTRIM_PREG}, + {0x9703, SIERRA_DRVCTRL_BOOST_PREG}, + {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG}, + {0x9800, SIERRA_RX_CTLE_CAL_PREG}, + {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x0041, SIERRA_DEQ_GLUT0}, + {0x0082, SIERRA_DEQ_GLUT1}, + {0x00C3, SIERRA_DEQ_GLUT2}, + {0x0145, SIERRA_DEQ_GLUT3}, + {0x0186, SIERRA_DEQ_GLUT4}, + {0x09E7, SIERRA_DEQ_ALUT0}, + {0x09A6, SIERRA_DEQ_ALUT1}, + {0x0965, SIERRA_DEQ_ALUT2}, + {0x08E3, SIERRA_DEQ_ALUT3}, + {0x00FA, SIERRA_DEQ_DFETAP0}, + {0x00FA, SIERRA_DEQ_DFETAP1}, + {0x00FA, SIERRA_DEQ_DFETAP2}, + {0x00FA, SIERRA_DEQ_DFETAP3}, + {0x00FA, SIERRA_DEQ_DFETAP4}, + {0x000F, SIERRA_DEQ_PRECUR_PREG}, + {0x0280, SIERRA_DEQ_POSTCUR_PREG}, + {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG}, + {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, + {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x002B, SIERRA_CPI_TRIM_PREG}, + {0x0003, SIERRA_EPI_CTRL_PREG}, + {0x803F, SIERRA_SDFILT_H2L_A_PREG}, + {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}, + {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} +}; + +static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = { + .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs, + .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs), +}; + /* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */ static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = { {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, @@ -2299,9 +2486,9 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = { [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals, }, [TYPE_QSGMII] = { - [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals, - [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals, - [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals, + [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals, + [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals, + [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals, }, }, [TYPE_USB] = { diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c index a95572b397ca..e625b32889bf 100644 --- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c +++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c @@ -4,17 +4,33 @@ * Copyright 2019 Purism SPC */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> +#include <linux/firmware/imx/ipc.h> +#include <linux/firmware/imx/svc/misc.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <dt-bindings/firmware/imx/rsrc.h> + +/* Control and Status Registers(CSR) */ +#define PHY_CTRL 0x00 +#define CCM_MASK GENMASK(7, 5) +#define CCM(n) FIELD_PREP(CCM_MASK, (n)) +#define CCM_1_2V 0x5 +#define CA_MASK GENMASK(4, 2) +#define CA_3_51MA 0x4 +#define CA(n) FIELD_PREP(CA_MASK, (n)) +#define RFB BIT(1) +#define LVDS_EN BIT(0) /* DPHY registers */ #define DPHY_PD_DPHY 0x00 @@ -55,8 +71,15 @@ #define PWR_ON 0 #define PWR_OFF 1 +#define MIN_VCO_FREQ 640000000 +#define MAX_VCO_FREQ 1500000000 + +#define MIN_LVDS_REFCLK_FREQ 24000000 +#define MAX_LVDS_REFCLK_FREQ 150000000 + enum mixel_dphy_devtype { MIXEL_IMX8MQ, + MIXEL_IMX8QXP, }; struct mixel_dphy_devdata { @@ -65,6 +88,7 @@ struct mixel_dphy_devdata { u8 reg_rxlprp; u8 reg_rxcdrp; u8 reg_rxhs_settle; + bool is_combo; /* MIPI DPHY and LVDS PHY combo */ }; static const struct mixel_dphy_devdata mixel_dphy_devdata[] = { @@ -74,6 +98,10 @@ static const struct mixel_dphy_devdata mixel_dphy_devdata[] = { .reg_rxlprp = 0x40, .reg_rxcdrp = 0x44, .reg_rxhs_settle = 0x48, + .is_combo = false, + }, + [MIXEL_IMX8QXP] = { + .is_combo = true, }, }; @@ -95,8 +123,12 @@ struct mixel_dphy_cfg { struct mixel_dphy_priv { struct mixel_dphy_cfg cfg; struct regmap *regmap; + struct regmap *lvds_regmap; struct clk *phy_ref_clk; const struct mixel_dphy_devdata *devdata; + struct imx_sc_ipc *ipc_handle; + bool is_slave; + int id; }; static const struct regmap_config mixel_dphy_regmap_config = { @@ -317,7 +349,8 @@ static int mixel_dphy_set_pll_params(struct phy *phy) return 0; } -static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts) +static int +mixel_dphy_configure_mipi_dphy(struct phy *phy, union phy_configure_opts *opts) { struct mixel_dphy_priv *priv = phy_get_drvdata(phy); struct mixel_dphy_cfg cfg = { 0 }; @@ -345,15 +378,126 @@ static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts) return 0; } +static int +mixel_dphy_configure_lvds_phy(struct phy *phy, union phy_configure_opts *opts) +{ + struct mixel_dphy_priv *priv = phy_get_drvdata(phy); + struct phy_configure_opts_lvds *lvds_opts = &opts->lvds; + unsigned long data_rate; + unsigned long fvco; + u32 rsc; + u32 co; + int ret; + + priv->is_slave = lvds_opts->is_slave; + + /* LVDS interface pins */ + regmap_write(priv->lvds_regmap, PHY_CTRL, + CCM(CCM_1_2V) | CA(CA_3_51MA) | RFB); + + /* enable MODE8 only for slave LVDS PHY */ + rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0; + ret = imx_sc_misc_set_control(priv->ipc_handle, rsc, IMX_SC_C_DUAL_MODE, + lvds_opts->is_slave); + if (ret) { + dev_err(&phy->dev, "Failed to configure MODE8: %d\n", ret); + return ret; + } + + /* + * Choose an appropriate divider ratio to meet the requirement of + * PLL VCO frequency range. + * + * ----- 640MHz ~ 1500MHz ------------ --------------- + * | VCO | ----------------> | CO divider | -> | LVDS data rate| + * ----- FVCO ------------ --------------- + * 1/2/4/8 div 7 * differential_clk_rate + */ + data_rate = 7 * lvds_opts->differential_clk_rate; + for (co = 1; co <= 8; co *= 2) { + fvco = data_rate * co; + + if (fvco >= MIN_VCO_FREQ) + break; + } + + if (fvco < MIN_VCO_FREQ || fvco > MAX_VCO_FREQ) { + dev_err(&phy->dev, "VCO frequency %lu is out of range\n", fvco); + return -ERANGE; + } + + /* + * CO is configurable, while CN and CM are not, + * as fixed ratios 1 and 7 are applied respectively. + */ + phy_write(phy, __ffs(co), DPHY_CO); + + /* set reference clock rate */ + clk_set_rate(priv->phy_ref_clk, lvds_opts->differential_clk_rate); + + return ret; +} + +static int mixel_dphy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + if (!opts) { + dev_err(&phy->dev, "No configuration options\n"); + return -EINVAL; + } + + if (phy->attrs.mode == PHY_MODE_MIPI_DPHY) + return mixel_dphy_configure_mipi_dphy(phy, opts); + else if (phy->attrs.mode == PHY_MODE_LVDS) + return mixel_dphy_configure_lvds_phy(phy, opts); + + dev_err(&phy->dev, + "Failed to configure PHY with invalid PHY mode: %d\n", phy->attrs.mode); + + return -EINVAL; +} + +static int +mixel_dphy_validate_lvds_phy(struct phy *phy, union phy_configure_opts *opts) +{ + struct phy_configure_opts_lvds *lvds_cfg = &opts->lvds; + + if (lvds_cfg->bits_per_lane_and_dclk_cycle != 7) { + dev_err(&phy->dev, "Invalid bits per LVDS data lane: %u\n", + lvds_cfg->bits_per_lane_and_dclk_cycle); + return -EINVAL; + } + + if (lvds_cfg->lanes != 4) { + dev_err(&phy->dev, "Invalid LVDS data lanes: %u\n", lvds_cfg->lanes); + return -EINVAL; + } + + if (lvds_cfg->differential_clk_rate < MIN_LVDS_REFCLK_FREQ || + lvds_cfg->differential_clk_rate > MAX_LVDS_REFCLK_FREQ) { + dev_err(&phy->dev, + "Invalid LVDS differential clock rate: %lu\n", + lvds_cfg->differential_clk_rate); + return -EINVAL; + } + + return 0; +} + static int mixel_dphy_validate(struct phy *phy, enum phy_mode mode, int submode, union phy_configure_opts *opts) { - struct mixel_dphy_cfg cfg = { 0 }; + if (mode == PHY_MODE_MIPI_DPHY) { + struct mixel_dphy_cfg mipi_dphy_cfg = { 0 }; - if (mode != PHY_MODE_MIPI_DPHY) - return -EINVAL; + return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy, + &mipi_dphy_cfg); + } else if (mode == PHY_MODE_LVDS) { + return mixel_dphy_validate_lvds_phy(phy, opts); + } - return mixel_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg); + dev_err(&phy->dev, + "Failed to validate PHY with invalid PHY mode: %d\n", mode); + return -EINVAL; } static int mixel_dphy_init(struct phy *phy) @@ -373,27 +517,75 @@ static int mixel_dphy_exit(struct phy *phy) return 0; } -static int mixel_dphy_power_on(struct phy *phy) +static int mixel_dphy_power_on_mipi_dphy(struct phy *phy) { struct mixel_dphy_priv *priv = phy_get_drvdata(phy); u32 locked; int ret; - ret = clk_prepare_enable(priv->phy_ref_clk); - if (ret < 0) - return ret; - phy_write(phy, PWR_ON, DPHY_PD_PLL); ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked, locked, PLL_LOCK_SLEEP, PLL_LOCK_TIMEOUT); if (ret < 0) { dev_err(&phy->dev, "Could not get DPHY lock (%d)!\n", ret); - goto clock_disable; + return ret; } phy_write(phy, PWR_ON, DPHY_PD_DPHY); return 0; +} + +static int mixel_dphy_power_on_lvds_phy(struct phy *phy) +{ + struct mixel_dphy_priv *priv = phy_get_drvdata(phy); + u32 locked; + int ret; + + regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, LVDS_EN); + + phy_write(phy, PWR_ON, DPHY_PD_DPHY); + phy_write(phy, PWR_ON, DPHY_PD_PLL); + + /* do not wait for slave LVDS PHY being locked */ + if (priv->is_slave) + return 0; + + ret = regmap_read_poll_timeout(priv->regmap, DPHY_LOCK, locked, + locked, PLL_LOCK_SLEEP, + PLL_LOCK_TIMEOUT); + if (ret < 0) { + dev_err(&phy->dev, "Could not get LVDS PHY lock (%d)!\n", ret); + return ret; + } + + return 0; +} + +static int mixel_dphy_power_on(struct phy *phy) +{ + struct mixel_dphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = clk_prepare_enable(priv->phy_ref_clk); + if (ret < 0) + return ret; + + if (phy->attrs.mode == PHY_MODE_MIPI_DPHY) { + ret = mixel_dphy_power_on_mipi_dphy(phy); + } else if (phy->attrs.mode == PHY_MODE_LVDS) { + ret = mixel_dphy_power_on_lvds_phy(phy); + } else { + dev_err(&phy->dev, + "Failed to power on PHY with invalid PHY mode: %d\n", + phy->attrs.mode); + ret = -EINVAL; + } + + if (ret) + goto clock_disable; + + return 0; clock_disable: clk_disable_unprepare(priv->phy_ref_clk); return ret; @@ -406,16 +598,51 @@ static int mixel_dphy_power_off(struct phy *phy) phy_write(phy, PWR_OFF, DPHY_PD_PLL); phy_write(phy, PWR_OFF, DPHY_PD_DPHY); + if (phy->attrs.mode == PHY_MODE_LVDS) + regmap_update_bits(priv->lvds_regmap, PHY_CTRL, LVDS_EN, 0); + clk_disable_unprepare(priv->phy_ref_clk); return 0; } +static int mixel_dphy_set_mode(struct phy *phy, enum phy_mode mode, int submode) +{ + struct mixel_dphy_priv *priv = phy_get_drvdata(phy); + int ret; + + if (priv->devdata->is_combo && mode != PHY_MODE_LVDS) { + dev_err(&phy->dev, "Failed to set PHY mode for combo PHY\n"); + return -EINVAL; + } + + if (!priv->devdata->is_combo && mode != PHY_MODE_MIPI_DPHY) { + dev_err(&phy->dev, "Failed to set PHY mode to MIPI DPHY\n"); + return -EINVAL; + } + + if (priv->devdata->is_combo) { + u32 rsc = priv->id ? IMX_SC_R_MIPI_1 : IMX_SC_R_MIPI_0; + + ret = imx_sc_misc_set_control(priv->ipc_handle, + rsc, IMX_SC_C_MODE, + mode == PHY_MODE_LVDS); + if (ret) { + dev_err(&phy->dev, + "Failed to set PHY mode via SCU ipc: %d\n", ret); + return ret; + } + } + + return 0; +} + static const struct phy_ops mixel_dphy_phy_ops = { .init = mixel_dphy_init, .exit = mixel_dphy_exit, .power_on = mixel_dphy_power_on, .power_off = mixel_dphy_power_off, + .set_mode = mixel_dphy_set_mode, .configure = mixel_dphy_configure, .validate = mixel_dphy_validate, .owner = THIS_MODULE, @@ -424,6 +651,8 @@ static const struct phy_ops mixel_dphy_phy_ops = { static const struct of_device_id mixel_dphy_of_match[] = { { .compatible = "fsl,imx8mq-mipi-dphy", .data = &mixel_dphy_devdata[MIXEL_IMX8MQ] }, + { .compatible = "fsl,imx8qxp-mipi-dphy", + .data = &mixel_dphy_devdata[MIXEL_IMX8QXP] }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, mixel_dphy_of_match); @@ -436,6 +665,7 @@ static int mixel_dphy_probe(struct platform_device *pdev) struct mixel_dphy_priv *priv; struct phy *phy; void __iomem *base; + int ret; if (!np) return -ENODEV; @@ -467,6 +697,30 @@ static int mixel_dphy_probe(struct platform_device *pdev) dev_dbg(dev, "phy_ref clock rate: %lu\n", clk_get_rate(priv->phy_ref_clk)); + if (priv->devdata->is_combo) { + priv->lvds_regmap = + syscon_regmap_lookup_by_phandle(np, "fsl,syscon"); + if (IS_ERR(priv->lvds_regmap)) { + ret = PTR_ERR(priv->lvds_regmap); + dev_err_probe(dev, ret, "Failed to get LVDS regmap\n"); + return ret; + } + + priv->id = of_alias_get_id(np, "mipi_dphy"); + if (priv->id < 0) { + dev_err(dev, "Failed to get phy node alias id: %d\n", + priv->id); + return priv->id; + } + + ret = imx_scu_get_handle(&priv->ipc_handle); + if (ret) { + dev_err_probe(dev, ret, + "Failed to get SCU ipc handle\n"); + return ret; + } + } + dev_set_drvdata(dev, priv); phy = devm_phy_create(dev, np, &mixel_dphy_phy_ops); diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c index f1eb03ba25d6..ad7d2edfc414 100644 --- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c +++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c @@ -94,15 +94,21 @@ static int imx8_pcie_phy_init(struct phy *phy) IMX8MM_GPR_PCIE_CMN_RST); usleep_range(200, 500); - if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT) { + if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT || + pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) { /* Configure the pad as input */ val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061); writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN, imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061); - } else if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT) { + } else { /* Configure the PHY to output the refclock via pad */ writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN, imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061); + } + + if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT || + pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) { + /* Source clock from SoC internal PLL */ writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL, imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062); writel(AUX_PLL_REFCLK_SEL_SYS_PLL, diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c index 5fb4217fb8e0..d4bd419abc3c 100644 --- a/drivers/phy/mediatek/phy-mtk-hdmi.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi.c @@ -120,20 +120,16 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) return PTR_ERR(hdmi_phy->regs); ref_clk = devm_clk_get(dev, "pll_ref"); - if (IS_ERR(ref_clk)) { - ret = PTR_ERR(ref_clk); - dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n", - ret); - return ret; - } + if (IS_ERR(ref_clk)) + return dev_err_probe(dev, PTR_ERR(ref_clk), + "Failed to get PLL reference clock\n"); + ref_clk_name = __clk_get_name(ref_clk); ret = of_property_read_string(dev->of_node, "clock-output-names", &clk_init.name); - if (ret < 0) { - dev_err(dev, "Failed to read clock-output-names: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to read clock-output-names\n"); hdmi_phy->dev = dev; hdmi_phy->conf = @@ -141,25 +137,19 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init); hdmi_phy->pll_hw.init = &clk_init; hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); - if (IS_ERR(hdmi_phy->pll)) { - ret = PTR_ERR(hdmi_phy->pll); - dev_err(dev, "Failed to register PLL: %d\n", ret); - return ret; - } + if (IS_ERR(hdmi_phy->pll)) + return dev_err_probe(dev, PTR_ERR(hdmi_phy->pll), + "Failed to register PLL\n"); ret = of_property_read_u32(dev->of_node, "mediatek,ibias", &hdmi_phy->ibias); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get ibias\n"); ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up", &hdmi_phy->ibias_up); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get ibias_up\n"); dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n"); hdmi_phy->drv_imp_clk = 0x30; @@ -168,17 +158,15 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) hdmi_phy->drv_imp_d0 = 0x30; phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy)); - if (IS_ERR(phy)) { - dev_err(dev, "Failed to create HDMI PHY\n"); - return PTR_ERR(phy); - } + if (IS_ERR(phy)) + return dev_err_probe(dev, PTR_ERR(phy), "Cannot create HDMI PHY\n"); + phy_set_drvdata(phy, hdmi_phy); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) { - dev_err(dev, "Failed to register HDMI PHY\n"); - return PTR_ERR(phy_provider); - } + if (IS_ERR(phy_provider)) + return dev_err_probe(dev, PTR_ERR(phy_provider), + "Failed to register HDMI PHY\n"); if (hdmi_phy->conf->pll_default_off) hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy); diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c index 67b005d5b9e3..28506932bd91 100644 --- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c +++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c @@ -154,11 +154,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return PTR_ERR(mipi_tx->regs); ref_clk = devm_clk_get(dev, NULL); - if (IS_ERR(ref_clk)) { - ret = PTR_ERR(ref_clk); - dev_err(dev, "Failed to get reference clock: %d\n", ret); - return ret; - } + if (IS_ERR(ref_clk)) + return dev_err_probe(dev, PTR_ERR(ref_clk), + "Failed to get reference clock\n"); ret = of_property_read_u32(dev->of_node, "drive-strength-microamp", &mipi_tx->mipitx_drive); @@ -178,27 +176,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) ret = of_property_read_string(dev->of_node, "clock-output-names", &clk_init.name); - if (ret < 0) { - dev_err(dev, "Failed to read clock-output-names: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to read clock-output-names\n"); clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops; mipi_tx->pll_hw.init = &clk_init; mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw); - if (IS_ERR(mipi_tx->pll)) { - ret = PTR_ERR(mipi_tx->pll); - dev_err(dev, "Failed to register PLL: %d\n", ret); - return ret; - } + if (IS_ERR(mipi_tx->pll)) + return dev_err_probe(dev, PTR_ERR(mipi_tx->pll), "Failed to register PLL\n"); phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops); - if (IS_ERR(phy)) { - ret = PTR_ERR(phy); - dev_err(dev, "Failed to create MIPI D-PHY: %d\n", ret); - return ret; - } + if (IS_ERR(phy)) + return dev_err_probe(dev, PTR_ERR(phy), "Failed to create MIPI D-PHY\n"); + phy_set_drvdata(phy, mipi_tx); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c index 6f3fe37dee0e..95c6dbb52da7 100644 --- a/drivers/phy/phy-can-transceiver.c +++ b/drivers/phy/phy-can-transceiver.c @@ -10,6 +10,7 @@ #include<linux/module.h> #include<linux/gpio.h> #include<linux/gpio/consumer.h> +#include <linux/mux/consumer.h> struct can_transceiver_data { u32 flags; @@ -21,13 +22,22 @@ struct can_transceiver_phy { struct phy *generic_phy; struct gpio_desc *standby_gpio; struct gpio_desc *enable_gpio; + struct mux_state *mux_state; }; /* Power on function */ static int can_transceiver_phy_power_on(struct phy *phy) { struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy); - + int ret; + + if (can_transceiver_phy->mux_state) { + ret = mux_state_select(can_transceiver_phy->mux_state); + if (ret) { + dev_err(&phy->dev, "Failed to select CAN mux: %d\n", ret); + return ret; + } + } if (can_transceiver_phy->standby_gpio) gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 0); if (can_transceiver_phy->enable_gpio) @@ -45,6 +55,8 @@ static int can_transceiver_phy_power_off(struct phy *phy) gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 1); if (can_transceiver_phy->enable_gpio) gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 0); + if (can_transceiver_phy->mux_state) + mux_state_deselect(can_transceiver_phy->mux_state); return 0; } @@ -95,6 +107,16 @@ static int can_transceiver_phy_probe(struct platform_device *pdev) match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node); drvdata = match->data; + if (of_property_read_bool(dev->of_node, "mux-states")) { + struct mux_state *mux_state; + + mux_state = devm_mux_state_get(dev, NULL); + if (IS_ERR(mux_state)) + return dev_err_probe(&pdev->dev, PTR_ERR(mux_state), + "failed to get mux\n"); + can_transceiver_phy->mux_state = mux_state; + } + phy = devm_phy_create(dev, dev->of_node, &can_transceiver_phy_ops); if (IS_ERR(phy)) { diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 91e28d6ce450..d93ddf1262c5 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -229,6 +229,17 @@ void phy_pm_runtime_forbid(struct phy *phy) } EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid); +/** + * phy_init - phy internal initialization before phy operation + * @phy: the phy returned by phy_get() + * + * Used to allow phy's driver to perform phy internal initialization, + * such as PLL block powering, clock initialization or anything that's + * is required by the phy to perform the start of operation. + * Must be called before phy_power_on(). + * + * Return: %0 if successful, a negative error code otherwise + */ int phy_init(struct phy *phy) { int ret; @@ -242,6 +253,9 @@ int phy_init(struct phy *phy) ret = 0; /* Override possible ret == -ENOTSUPP */ mutex_lock(&phy->mutex); + if (phy->power_count > phy->init_count) + dev_warn(&phy->dev, "phy_power_on was called before phy_init\n"); + if (phy->init_count == 0 && phy->ops->init) { ret = phy->ops->init(phy); if (ret < 0) { @@ -258,6 +272,14 @@ out: } EXPORT_SYMBOL_GPL(phy_init); +/** + * phy_exit - Phy internal un-initialization + * @phy: the phy returned by phy_get() + * + * Must be called after phy_power_off(). + * + * Return: %0 if successful, a negative error code otherwise + */ int phy_exit(struct phy *phy) { int ret; @@ -287,6 +309,14 @@ out: } EXPORT_SYMBOL_GPL(phy_exit); +/** + * phy_power_on - Enable the phy and enter proper operation + * @phy: the phy returned by phy_get() + * + * Must be called after phy_init(). + * + * Return: %0 if successful, a negative error code otherwise + */ int phy_power_on(struct phy *phy) { int ret = 0; @@ -329,6 +359,14 @@ out: } EXPORT_SYMBOL_GPL(phy_power_on); +/** + * phy_power_off - Disable the phy. + * @phy: the phy returned by phy_get() + * + * Must be called before phy_exit(). + * + * Return: %0 if successful, a negative error code otherwise + */ int phy_power_off(struct phy *phy) { int ret; @@ -432,7 +470,7 @@ EXPORT_SYMBOL_GPL(phy_reset); * runtime, which are otherwise lost after host controller reset and cannot * be applied in phy_init() or phy_power_on(). * - * Returns: 0 if successful, an negative error code otherwise + * Return: %0 if successful, a negative error code otherwise */ int phy_calibrate(struct phy *phy) { @@ -458,7 +496,7 @@ EXPORT_SYMBOL_GPL(phy_calibrate); * on the phy. The configuration will be applied on the current phy * mode, that can be changed using phy_set_mode(). * - * Returns: 0 if successful, an negative error code otherwise + * Return: %0 if successful, a negative error code otherwise */ int phy_configure(struct phy *phy, union phy_configure_opts *opts) { @@ -492,7 +530,7 @@ EXPORT_SYMBOL_GPL(phy_configure); * PHY, so calling it as many times as deemed fit will have no side * effect. * - * Returns: 0 if successful, an negative error code otherwise + * Return: %0 if successful, a negative error code otherwise */ int phy_validate(struct phy *phy, enum phy_mode mode, int submode, union phy_configure_opts *opts) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index b144ae1f729a..c7309e981bfb 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -2535,6 +2535,50 @@ static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00), }; +static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0b), +}; + +static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00), +}; + static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9), QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11), @@ -3177,7 +3221,7 @@ struct qmp_phy_combo_cfg { * @tx2: iomapped memory space for second lane's tx (in dual lane PHYs) * @rx2: iomapped memory space for second lane's rx (in dual lane PHYs) * @pcs_misc: iomapped memory space for lane's pcs_misc - * @pipe_clk: pipe lock + * @pipe_clk: pipe clock * @index: lane index * @qmp: QMP phy to which this lane belongs * @lane_rst: lane's reset controller @@ -4217,6 +4261,35 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = { .pwrdn_delay_max = 1005, /* us */ }; +static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl), + .tx_tbl = sdx65_usb3_uniphy_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_tx_tbl), + .rx_tbl = sdx65_usb3_uniphy_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sdx65_usb3_uniphy_rx_tbl), + .pcs_tbl = sm8350_usb3_uniphy_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_uniphy_pcs_tbl), + .clk_list = qmp_v4_sdx55_usbphy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v4_sdx55_usbphy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8350_usb3_uniphy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .phy_status = PHYSTATUS, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, +}; + static const struct qmp_phy_cfg sm8350_ufsphy_cfg = { .type = PHY_TYPE_UFS, .nlanes = 2, @@ -5012,7 +5085,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); - goto err_reg_enable; + goto err_unlock; } for (i = 0; i < cfg->num_resets; i++) { @@ -5020,7 +5093,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) if (ret) { dev_err(qmp->dev, "%s reset assert failed\n", cfg->reset_list[i]); - goto err_rst_assert; + goto err_disable_regulators; } } @@ -5029,13 +5102,13 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) if (ret) { dev_err(qmp->dev, "%s reset deassert failed\n", qphy->cfg->reset_list[i]); - goto err_rst; + goto err_assert_reset; } } ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); if (ret) - goto err_rst; + goto err_assert_reset; if (cfg->has_phy_dp_com_ctrl) { qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL, @@ -5077,12 +5150,12 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) return 0; -err_rst: +err_assert_reset: while (++i < cfg->num_resets) reset_control_assert(qmp->resets[i]); -err_rst_assert: +err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); -err_reg_enable: +err_unlock: mutex_unlock(&qmp->phy_mutex); return ret; @@ -5188,14 +5261,14 @@ static int qcom_qmp_phy_power_on(struct phy *phy) if (ret) { dev_err(qmp->dev, "lane%d reset deassert failed\n", qphy->index); - goto err_lane_rst; + return ret; } } ret = clk_prepare_enable(qphy->pipe_clk); if (ret) { dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); - goto err_clk_enable; + goto err_reset_lane; } /* Tx, Rx, and PCS configurations */ @@ -5246,7 +5319,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy) ret = reset_control_deassert(qmp->ufs_reset); if (ret) - goto err_lane_rst; + goto err_disable_pipe_clk; qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num); @@ -5285,17 +5358,17 @@ static int qcom_qmp_phy_power_on(struct phy *phy) PHY_INIT_COMPLETE_TIMEOUT); if (ret) { dev_err(qmp->dev, "phy initialization timed-out\n"); - goto err_pcs_ready; + goto err_disable_pipe_clk; } } return 0; -err_pcs_ready: +err_disable_pipe_clk: clk_disable_unprepare(qphy->pipe_clk); -err_clk_enable: +err_reset_lane: if (cfg->has_lane_rst) reset_control_assert(qphy->lane_rst); -err_lane_rst: + return ret; } @@ -5514,7 +5587,7 @@ static int qcom_qmp_phy_reset_init(struct device *dev, const struct qmp_phy_cfg struct reset_control *rst; const char *name = cfg->reset_list[i]; - rst = devm_reset_control_get(dev, name); + rst = devm_reset_control_get_exclusive(dev, name); if (IS_ERR(rst)) { dev_err(dev, "failed to get %s reset\n", name); return PTR_ERR(rst); @@ -5818,6 +5891,11 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = { .owner = THIS_MODULE, }; +static void qcom_qmp_reset_control_put(void *data) +{ + reset_control_put(data); +} + static int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id, void __iomem *serdes, const struct qmp_phy_cfg *cfg) @@ -5890,7 +5968,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id, * all phys that don't need this. */ snprintf(prop_name, sizeof(prop_name), "pipe%d", id); - qphy->pipe_clk = of_clk_get_by_name(np, prop_name); + qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name); if (IS_ERR(qphy->pipe_clk)) { if (cfg->type == PHY_TYPE_PCIE || cfg->type == PHY_TYPE_USB3) { @@ -5907,11 +5985,15 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id, /* Get lane reset, if any */ if (cfg->has_lane_rst) { snprintf(prop_name, sizeof(prop_name), "lane%d", id); - qphy->lane_rst = of_reset_control_get(np, prop_name); + qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name); if (IS_ERR(qphy->lane_rst)) { dev_err(dev, "failed to get lane%d reset\n", id); return PTR_ERR(qphy->lane_rst); } + ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put, + qphy->lane_rst); + if (ret) + return ret; } if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE) @@ -6008,6 +6090,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,sm6115-qmp-ufs-phy", .data = &sm6115_ufsphy_cfg, }, { + .compatible = "qcom,sm6350-qmp-ufs-phy", + .data = &sdm845_ufsphy_cfg, + }, { .compatible = "qcom,sm8150-qmp-ufs-phy", .data = &sm8150_ufsphy_cfg, }, { @@ -6047,6 +6132,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,sdx55-qmp-usb3-uni-phy", .data = &sdx55_usb3_uniphy_cfg, }, { + .compatible = "qcom,sdx65-qmp-usb3-uni-phy", + .data = &sdx65_usb3_uniphy_cfg, + }, { .compatible = "qcom,sm8350-qmp-usb3-phy", .data = &sm8350_usb3phy_cfg, }, { diff --git a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c index 4df9476ef2a9..639452f47869 100644 --- a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c +++ b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c @@ -327,7 +327,6 @@ static int rk_dphy_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; const struct rk_dphy_drv_data *drv_data; struct phy_provider *phy_provider; - const struct of_device_id *of_id; struct rk_dphy *priv; struct phy *phy; unsigned int i; @@ -347,11 +346,7 @@ static int rk_dphy_probe(struct platform_device *pdev) return -ENODEV; } - of_id = of_match_device(rk_dphy_dt_ids, dev); - if (!of_id) - return -EINVAL; - - drv_data = of_id->data; + drv_data = of_device_get_match_data(dev); priv->drv_data = drv_data; priv->clks = devm_kcalloc(&pdev->dev, drv_data->num_clks, sizeof(*priv->clks), GFP_KERNEL); diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index eca77e44a4c1..6711659f727c 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -116,11 +116,15 @@ struct rockchip_chg_det_reg { * @bvalid_det_en: vbus valid rise detection enable register. * @bvalid_det_st: vbus valid rise detection status register. * @bvalid_det_clr: vbus valid rise detection clear register. + * @id_det_en: id detection enable register. + * @id_det_st: id detection state register. + * @id_det_clr: id detection clear register. * @ls_det_en: linestate detection enable register. * @ls_det_st: linestate detection state register. * @ls_det_clr: linestate detection clear register. * @utmi_avalid: utmi vbus avalid status register. * @utmi_bvalid: utmi vbus bvalid status register. + * @utmi_id: utmi id state register. * @utmi_ls: utmi linestate state register. * @utmi_hstdet: utmi host disconnect register. */ @@ -129,11 +133,15 @@ struct rockchip_usb2phy_port_cfg { struct usb2phy_reg bvalid_det_en; struct usb2phy_reg bvalid_det_st; struct usb2phy_reg bvalid_det_clr; + struct usb2phy_reg id_det_en; + struct usb2phy_reg id_det_st; + struct usb2phy_reg id_det_clr; struct usb2phy_reg ls_det_en; struct usb2phy_reg ls_det_st; struct usb2phy_reg ls_det_clr; struct usb2phy_reg utmi_avalid; struct usb2phy_reg utmi_bvalid; + struct usb2phy_reg utmi_id; struct usb2phy_reg utmi_ls; struct usb2phy_reg utmi_hstdet; }; @@ -161,6 +169,7 @@ struct rockchip_usb2phy_cfg { * @suspended: phy suspended flag. * @vbus_attached: otg device vbus status. * @bvalid_irq: IRQ number assigned for vbus valid rise detection. + * @id_irq: IRQ number assigned for ID pin detection. * @ls_irq: IRQ number assigned for linestate detection. * @otg_mux_irq: IRQ number which multiplex otg-id/otg-bvalid/linestate * irqs to one irq in otg-port. @@ -179,6 +188,7 @@ struct rockchip_usb2phy_port { bool suspended; bool vbus_attached; int bvalid_irq; + int id_irq; int ls_irq; int otg_mux_irq; struct mutex mutex; @@ -253,7 +263,7 @@ static inline bool property_enabled(struct regmap *base, return false; tmp = (orig & mask) >> reg->bitstart; - return tmp == reg->enable; + return tmp != reg->disable; } static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw) @@ -419,6 +429,19 @@ static int rockchip_usb2phy_init(struct phy *phy) if (ret) goto out; + /* clear id status and enable id detect irq */ + ret = property_enable(rphy->grf, + &rport->port_cfg->id_det_clr, + true); + if (ret) + goto out; + + ret = property_enable(rphy->grf, + &rport->port_cfg->id_det_en, + true); + if (ret) + goto out; + schedule_delayed_work(&rport->otg_sm_work, OTG_SCHEDULE_DELAY * 3); } else { @@ -905,27 +928,40 @@ static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data) if (!property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st)) return IRQ_NONE; - mutex_lock(&rport->mutex); - /* clear bvalid detect irq pending status */ property_enable(rphy->grf, &rport->port_cfg->bvalid_det_clr, true); - mutex_unlock(&rport->mutex); - rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work); return IRQ_HANDLED; } -static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data) +static irqreturn_t rockchip_usb2phy_id_irq(int irq, void *data) { struct rockchip_usb2phy_port *rport = data; struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent); + bool id; - if (property_enabled(rphy->grf, &rport->port_cfg->bvalid_det_st)) - return rockchip_usb2phy_bvalid_irq(irq, data); - else + if (!property_enabled(rphy->grf, &rport->port_cfg->id_det_st)) return IRQ_NONE; + + /* clear id detect irq pending status */ + property_enable(rphy->grf, &rport->port_cfg->id_det_clr, true); + + id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id); + extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id); + + return IRQ_HANDLED; +} + +static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data) +{ + irqreturn_t ret = IRQ_NONE; + + ret |= rockchip_usb2phy_bvalid_irq(irq, data); + ret |= rockchip_usb2phy_id_irq(irq, data); + + return ret; } static irqreturn_t rockchip_usb2phy_irq(int irq, void *data) @@ -940,8 +976,14 @@ static irqreturn_t rockchip_usb2phy_irq(int irq, void *data) if (!rport->phy) continue; - /* Handle linestate irq for both otg port and host port */ - ret = rockchip_usb2phy_linestate_irq(irq, rport); + switch (rport->port_id) { + case USB2PHY_PORT_OTG: + ret |= rockchip_usb2phy_otg_mux_irq(irq, rport); + break; + case USB2PHY_PORT_HOST: + ret |= rockchip_usb2phy_linestate_irq(irq, rport); + break; + } } return ret; @@ -1015,6 +1057,25 @@ static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy, "failed to request otg-bvalid irq handle\n"); return ret; } + + rport->id_irq = of_irq_get_byname(child_np, "otg-id"); + if (rport->id_irq < 0) { + dev_err(rphy->dev, "no otg-id irq provided\n"); + ret = rport->id_irq; + return ret; + } + + ret = devm_request_threaded_irq(rphy->dev, rport->id_irq, + NULL, + rockchip_usb2phy_id_irq, + IRQF_ONESHOT, + "rockchip_usb2phy_id", + rport); + if (ret) { + dev_err(rphy->dev, + "failed to request otg-id irq handle\n"); + return ret; + } } break; default: @@ -1139,8 +1200,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev) else { rphy->grf = syscon_node_to_regmap(dev->parent->of_node); - if (IS_ERR(rphy->grf)) - return PTR_ERR(rphy->grf); + if (IS_ERR(rphy->grf)) + return PTR_ERR(rphy->grf); } if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) { @@ -1289,10 +1350,14 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = { .bvalid_det_en = { 0x0680, 3, 3, 0, 1 }, .bvalid_det_st = { 0x0690, 3, 3, 0, 1 }, .bvalid_det_clr = { 0x06a0, 3, 3, 0, 1 }, + .id_det_en = { 0x0680, 6, 5, 0, 3 }, + .id_det_st = { 0x0690, 6, 5, 0, 3 }, + .id_det_clr = { 0x06a0, 6, 5, 0, 3 }, .ls_det_en = { 0x0680, 2, 2, 0, 1 }, .ls_det_st = { 0x0690, 2, 2, 0, 1 }, .ls_det_clr = { 0x06a0, 2, 2, 0, 1 }, .utmi_bvalid = { 0x0480, 4, 4, 0, 1 }, + .utmi_id = { 0x0480, 1, 1, 0, 1 }, .utmi_ls = { 0x0480, 3, 2, 0, 1 }, }, [USB2PHY_PORT_HOST] = { @@ -1345,14 +1410,18 @@ static const struct rockchip_usb2phy_cfg rk3308_phy_cfgs[] = { .port_cfgs = { [USB2PHY_PORT_OTG] = { .phy_sus = { 0x0100, 8, 0, 0, 0x1d1 }, - .bvalid_det_en = { 0x3020, 2, 2, 0, 1 }, - .bvalid_det_st = { 0x3024, 2, 2, 0, 1 }, - .bvalid_det_clr = { 0x3028, 2, 2, 0, 1 }, + .bvalid_det_en = { 0x3020, 3, 2, 0, 3 }, + .bvalid_det_st = { 0x3024, 3, 2, 0, 3 }, + .bvalid_det_clr = { 0x3028, 3, 2, 0, 3 }, + .id_det_en = { 0x3020, 5, 4, 0, 3 }, + .id_det_st = { 0x3024, 5, 4, 0, 3 }, + .id_det_clr = { 0x3028, 5, 4, 0, 3 }, .ls_det_en = { 0x3020, 0, 0, 0, 1 }, .ls_det_st = { 0x3024, 0, 0, 0, 1 }, .ls_det_clr = { 0x3028, 0, 0, 0, 1 }, .utmi_avalid = { 0x0120, 10, 10, 0, 1 }, .utmi_bvalid = { 0x0120, 9, 9, 0, 1 }, + .utmi_id = { 0x0120, 6, 6, 0, 1 }, .utmi_ls = { 0x0120, 5, 4, 0, 1 }, }, [USB2PHY_PORT_HOST] = { @@ -1388,14 +1457,18 @@ static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = { .port_cfgs = { [USB2PHY_PORT_OTG] = { .phy_sus = { 0x0100, 15, 0, 0, 0x1d1 }, - .bvalid_det_en = { 0x0110, 2, 2, 0, 1 }, - .bvalid_det_st = { 0x0114, 2, 2, 0, 1 }, - .bvalid_det_clr = { 0x0118, 2, 2, 0, 1 }, + .bvalid_det_en = { 0x0110, 3, 2, 0, 3 }, + .bvalid_det_st = { 0x0114, 3, 2, 0, 3 }, + .bvalid_det_clr = { 0x0118, 3, 2, 0, 3 }, + .id_det_en = { 0x0110, 5, 4, 0, 3 }, + .id_det_st = { 0x0114, 5, 4, 0, 3 }, + .id_det_clr = { 0x0118, 5, 4, 0, 3 }, .ls_det_en = { 0x0110, 0, 0, 0, 1 }, .ls_det_st = { 0x0114, 0, 0, 0, 1 }, .ls_det_clr = { 0x0118, 0, 0, 0, 1 }, .utmi_avalid = { 0x0120, 10, 10, 0, 1 }, .utmi_bvalid = { 0x0120, 9, 9, 0, 1 }, + .utmi_id = { 0x0120, 6, 6, 0, 1 }, .utmi_ls = { 0x0120, 5, 4, 0, 1 }, }, [USB2PHY_PORT_HOST] = { @@ -1453,8 +1526,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = { .bvalid_det_en = { 0xe3c0, 3, 3, 0, 1 }, .bvalid_det_st = { 0xe3e0, 3, 3, 0, 1 }, .bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 }, + .id_det_en = { 0xe3c0, 5, 4, 0, 3 }, + .id_det_st = { 0xe3e0, 5, 4, 0, 3 }, + .id_det_clr = { 0xe3d0, 5, 4, 0, 3 }, .utmi_avalid = { 0xe2ac, 7, 7, 0, 1 }, .utmi_bvalid = { 0xe2ac, 12, 12, 0, 1 }, + .utmi_id = { 0xe2ac, 8, 8, 0, 1 }, }, [USB2PHY_PORT_HOST] = { .phy_sus = { 0xe458, 1, 0, 0x2, 0x1 }, @@ -1488,8 +1565,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = { .bvalid_det_en = { 0xe3c0, 8, 8, 0, 1 }, .bvalid_det_st = { 0xe3e0, 8, 8, 0, 1 }, .bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 }, + .id_det_en = { 0xe3c0, 10, 9, 0, 3 }, + .id_det_st = { 0xe3e0, 10, 9, 0, 3 }, + .id_det_clr = { 0xe3d0, 10, 9, 0, 3 }, .utmi_avalid = { 0xe2ac, 10, 10, 0, 1 }, .utmi_bvalid = { 0xe2ac, 16, 16, 0, 1 }, + .utmi_id = { 0xe2ac, 11, 11, 0, 1 }, }, [USB2PHY_PORT_HOST] = { .phy_sus = { 0xe468, 1, 0, 0x2, 0x1 }, @@ -1512,11 +1593,15 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = { .port_cfgs = { [USB2PHY_PORT_OTG] = { .phy_sus = { 0x0000, 8, 0, 0, 0x1d1 }, - .bvalid_det_en = { 0x0080, 2, 2, 0, 1 }, - .bvalid_det_st = { 0x0084, 2, 2, 0, 1 }, - .bvalid_det_clr = { 0x0088, 2, 2, 0, 1 }, + .bvalid_det_en = { 0x0080, 3, 2, 0, 3 }, + .bvalid_det_st = { 0x0084, 3, 2, 0, 3 }, + .bvalid_det_clr = { 0x0088, 3, 2, 0, 3 }, + .id_det_en = { 0x0080, 5, 4, 0, 3 }, + .id_det_st = { 0x0084, 5, 4, 0, 3 }, + .id_det_clr = { 0x0088, 5, 4, 0, 3 }, .utmi_avalid = { 0x00c0, 10, 10, 0, 1 }, .utmi_bvalid = { 0x00c0, 9, 9, 0, 1 }, + .utmi_id = { 0x00c0, 6, 6, 0, 1 }, }, [USB2PHY_PORT_HOST] = { /* Select suspend control from controller */ diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c index d2bbdc96a167..d76440ae10ff 100644 --- a/drivers/phy/rockchip/phy-rockchip-typec.c +++ b/drivers/phy/rockchip/phy-rockchip-typec.c @@ -1105,15 +1105,14 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct resource *res; const struct rockchip_usb3phy_port_cfg *phy_cfgs; - const struct of_device_id *match; int index, ret; tcphy = devm_kzalloc(dev, sizeof(*tcphy), GFP_KERNEL); if (!tcphy) return -ENOMEM; - match = of_match_device(dev->driver->of_match_table, dev); - if (!match || !match->data) { + phy_cfgs = of_device_get_match_data(dev); + if (!phy_cfgs) { dev_err(dev, "phy configs are not assigned!\n"); return -EINVAL; } @@ -1123,7 +1122,6 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev) if (IS_ERR(tcphy->base)) return PTR_ERR(tcphy->base); - phy_cfgs = match->data; /* find out a proper config which can be matched with dt. */ index = 0; while (phy_cfgs[index].reg) { diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index 19746e658a6a..15219ed43ce9 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -865,17 +865,20 @@ static int axp288_charger_probe(struct platform_device *pdev) info->regmap_irqc = axp20x->regmap_irqc; info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME); - if (info->cable.edev == NULL) { - dev_dbg(dev, "%s is not ready, probe deferred\n", - AXP288_EXTCON_DEV_NAME); - return -EPROBE_DEFER; + if (IS_ERR(info->cable.edev)) { + dev_err_probe(dev, PTR_ERR(info->cable.edev), + "extcon_get_extcon_dev(%s) failed\n", + AXP288_EXTCON_DEV_NAME); + return PTR_ERR(info->cable.edev); } if (acpi_dev_present(USB_HOST_EXTCON_HID, NULL, -1)) { info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_NAME); - if (info->otg.cable == NULL) { - dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n"); - return -EPROBE_DEFER; + if (IS_ERR(info->otg.cable)) { + dev_err_probe(dev, PTR_ERR(info->otg.cable), + "extcon_get_extcon_dev(%s) failed\n", + USB_HOST_EXTCON_NAME); + return PTR_ERR(info->otg.cable); } dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n"); } diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index d67edb760c94..92db79400a6a 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -985,13 +985,10 @@ static int charger_extcon_init(struct charger_manager *cm, cable->nb.notifier_call = charger_extcon_notifier; cable->extcon_dev = extcon_get_extcon_dev(cable->extcon_name); - if (IS_ERR_OR_NULL(cable->extcon_dev)) { + if (IS_ERR(cable->extcon_dev)) { pr_err("Cannot find extcon_dev for %s (cable: %s)\n", cable->extcon_name, cable->name); - if (cable->extcon_dev == NULL) - return -EPROBE_DEFER; - else - return PTR_ERR(cable->extcon_dev); + return PTR_ERR(cable->extcon_dev); } for (i = 0; i < ARRAY_SIZE(extcon_mapping); i++) { diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c index 127c73b0b3bd..1ec3535a257d 100644 --- a/drivers/power/supply/max8997_charger.c +++ b/drivers/power/supply/max8997_charger.c @@ -242,10 +242,10 @@ static int max8997_battery_probe(struct platform_device *pdev) dev_info(&pdev->dev, "couldn't get charger regulator\n"); } charger->edev = extcon_get_extcon_dev("max8997-muic"); - if (IS_ERR_OR_NULL(charger->edev)) { - if (!charger->edev) - return -EPROBE_DEFER; - dev_info(charger->dev, "couldn't get extcon device\n"); + if (IS_ERR(charger->edev)) { + dev_err_probe(charger->dev, PTR_ERR(charger->edev), + "couldn't get extcon device: max8997-muic\n"); + return PTR_ERR(charger->edev); } if (!IS_ERR(charger->reg) && !IS_ERR_OR_NULL(charger->edev)) { diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c index ec58091fc948..c0c4f895d76e 100644 --- a/drivers/slimbus/qcom-ctrl.c +++ b/drivers/slimbus/qcom-ctrl.c @@ -510,10 +510,8 @@ static int qcom_slim_probe(struct platform_device *pdev) } ctrl->irq = platform_get_irq(pdev, 0); - if (ctrl->irq < 0) { - dev_err(&pdev->dev, "no slimbus IRQ\n"); + if (ctrl->irq < 0) return ctrl->irq; - } sctrl = &ctrl->ctrl; sctrl->dev = &pdev->dev; diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 7040293c2ee8..0f29a08b4c09 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1526,13 +1526,11 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) if (IS_ERR(ctrl->base)) return PTR_ERR(ctrl->base); - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(&pdev->dev, "no slimbus IRQ resource\n"); - return -ENODEV; - } + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; - ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt, + ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt, IRQF_TRIGGER_HIGH, "slim-ngd", ctrl); if (ret) { dev_err(&pdev->dev, "request IRQ failed\n"); diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index b27f8853508e..5dcb7665fe22 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -41,25 +41,37 @@ static int event_manager_availability = -EACCES; static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER); static int sgi_num = XLNX_EVENT_SGI_NUM; +static bool is_need_to_unregister; + +/** + * struct agent_cb - Registered callback function and private data. + * @agent_data: Data passed back to handler function. + * @eve_cb: Function pointer to store the callback function. + * @list: member to create list. + */ +struct agent_cb { + void *agent_data; + event_cb_func_t eve_cb; + struct list_head list; +}; + /** * struct registered_event_data - Registered Event Data. * @key: key is the combine id(Node-Id | Event-Id) of type u64 * where upper u32 for Node-Id and lower u32 for Event-Id, * And this used as key to index into hashmap. - * @agent_data: Data passed back to handler function. * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc. - * @eve_cb: Function pointer to store the callback function. - * @wake: If this flag set, firmware will wakeup processor if is + * @wake: If this flag set, firmware will wake up processor if is * in sleep or power down state. + * @cb_list_head: Head of call back data list which contain the information + * about registered handler and private data. * @hentry: hlist_node that hooks this entry into hashtable. */ struct registered_event_data { u64 key; enum pm_api_cb_id cb_type; - void *agent_data; - - event_cb_func_t eve_cb; bool wake; + struct list_head cb_list_head; struct hlist_node hentry; }; @@ -78,29 +90,60 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons event_cb_func_t cb_fun, void *data) { u64 key = 0; + bool present_in_hash = false; struct registered_event_data *eve_data; + struct agent_cb *cb_data; + struct agent_cb *cb_pos; + struct agent_cb *cb_next; key = ((u64)node_id << 32U) | (u64)event; /* Check for existing entry in hash table for given key id */ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { if (eve_data->key == key) { - pr_err("Found as already registered\n"); - return -EINVAL; + present_in_hash = true; + break; } } - /* Add new entry if not present */ - eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL); - if (!eve_data) - return -ENOMEM; + if (!present_in_hash) { + /* Add new entry if not present in HASH table */ + eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL); + if (!eve_data) + return -ENOMEM; + eve_data->key = key; + eve_data->cb_type = PM_NOTIFY_CB; + eve_data->wake = wake; + INIT_LIST_HEAD(&eve_data->cb_list_head); + + cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); + if (!cb_data) + return -ENOMEM; + cb_data->eve_cb = cb_fun; + cb_data->agent_data = data; + + /* Add into callback list */ + list_add(&cb_data->list, &eve_data->cb_list_head); + + /* Add into HASH table */ + hash_add(reg_driver_map, &eve_data->hentry, key); + } else { + /* Search for callback function and private data in list */ + list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { + if (cb_pos->eve_cb == cb_fun && + cb_pos->agent_data == data) { + return 0; + } + } - eve_data->key = key; - eve_data->cb_type = PM_NOTIFY_CB; - eve_data->eve_cb = cb_fun; - eve_data->wake = wake; - eve_data->agent_data = data; + /* Add multiple handler and private data in list */ + cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); + if (!cb_data) + return -ENOMEM; + cb_data->eve_cb = cb_fun; + cb_data->agent_data = data; - hash_add(reg_driver_map, &eve_data->hentry, key); + list_add(&cb_data->list, &eve_data->cb_list_head); + } return 0; } @@ -108,6 +151,7 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data) { struct registered_event_data *eve_data; + struct agent_cb *cb_data; /* Check for existing entry in hash table for given cb_type */ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) { @@ -124,8 +168,16 @@ static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data) eve_data->key = 0; eve_data->cb_type = PM_INIT_SUSPEND_CB; - eve_data->eve_cb = cb_fun; - eve_data->agent_data = data; + INIT_LIST_HEAD(&eve_data->cb_list_head); + + cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); + if (!cb_data) + return -ENOMEM; + cb_data->eve_cb = cb_fun; + cb_data->agent_data = data; + + /* Add into callback list */ + list_add(&cb_data->list, &eve_data->cb_list_head); hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB); @@ -136,15 +188,26 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) { bool is_callback_found = false; struct registered_event_data *eve_data; + struct agent_cb *cb_pos; + struct agent_cb *cb_next; + + is_need_to_unregister = false; /* Check for existing entry in hash table for given cb_type */ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) { - if (eve_data->cb_type == PM_INIT_SUSPEND_CB && - eve_data->eve_cb == cb_fun) { - is_callback_found = true; + if (eve_data->cb_type == PM_INIT_SUSPEND_CB) { + /* Delete the list of callback */ + list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { + if (cb_pos->eve_cb == cb_fun) { + is_callback_found = true; + list_del_init(&cb_pos->list); + kfree(cb_pos); + } + } /* remove an object from a hashtable */ hash_del(&eve_data->hentry); kfree(eve_data); + is_need_to_unregister = true; } } if (!is_callback_found) { @@ -156,20 +219,36 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) } static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event, - event_cb_func_t cb_fun) + event_cb_func_t cb_fun, void *data) { bool is_callback_found = false; struct registered_event_data *eve_data; u64 key = ((u64)node_id << 32U) | (u64)event; + struct agent_cb *cb_pos; + struct agent_cb *cb_next; + + is_need_to_unregister = false; /* Check for existing entry in hash table for given key id */ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { - if (eve_data->key == key && - eve_data->eve_cb == cb_fun) { - is_callback_found = true; - /* remove an object from a hashtable */ - hash_del(&eve_data->hentry); - kfree(eve_data); + if (eve_data->key == key) { + /* Delete the list of callback */ + list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { + if (cb_pos->eve_cb == cb_fun && + cb_pos->agent_data == data) { + is_callback_found = true; + list_del_init(&cb_pos->list); + kfree(cb_pos); + } + } + + /* Remove HASH table if callback list is empty */ + if (list_empty(&eve_data->cb_list_head)) { + /* remove an object from a HASH table */ + hash_del(&eve_data->hentry); + kfree(eve_data); + is_need_to_unregister = true; + } } } if (!is_callback_found) { @@ -241,7 +320,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons eve = event & (1 << pos); if (!eve) continue; - xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); } } } @@ -263,10 +342,10 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons eve = event & (1 << pos); if (!eve) continue; - xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); } } else { - xlnx_remove_cb_for_notify_event(node_id, event, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); } return ret; } @@ -284,15 +363,18 @@ EXPORT_SYMBOL_GPL(xlnx_register_event); * @node_id: Node-Id related to event. * @event: Event Mask for the Error Event. * @cb_fun: Function pointer of callback function. + * @data: Pointer of agent's private data. * * Return: Returns 0 on successful unregistration else error code. */ int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, - event_cb_func_t cb_fun) + event_cb_func_t cb_fun, void *data) { - int ret; + int ret = 0; u32 eve, pos; + is_need_to_unregister = false; + if (event_manager_availability) return event_manager_availability; @@ -309,23 +391,26 @@ int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, co } else { /* Remove Node-Id/Event from hash table */ if (!xlnx_is_error_event(node_id)) { - xlnx_remove_cb_for_notify_event(node_id, event, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); } else { for (pos = 0; pos < MAX_BITS; pos++) { eve = event & (1 << pos); if (!eve) continue; - xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); } } - /* Un-register for Node-Id/Event combination */ - ret = zynqmp_pm_register_notifier(node_id, event, false, false); - if (ret) { - pr_err("%s() failed for 0x%x and 0x%x: %d\n", - __func__, node_id, event, ret); - return ret; + /* Un-register if list is empty */ + if (is_need_to_unregister) { + /* Un-register for Node-Id/Event combination */ + ret = zynqmp_pm_register_notifier(node_id, event, false, false); + if (ret) { + pr_err("%s() failed for 0x%x and 0x%x: %d\n", + __func__, node_id, event, ret); + return ret; + } } } @@ -338,12 +423,16 @@ static void xlnx_call_suspend_cb_handler(const u32 *payload) bool is_callback_found = false; struct registered_event_data *eve_data; u32 cb_type = payload[0]; + struct agent_cb *cb_pos; + struct agent_cb *cb_next; /* Check for existing entry in hash table for given cb_type */ hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) { if (eve_data->cb_type == cb_type) { - eve_data->eve_cb(&payload[0], eve_data->agent_data); - is_callback_found = true; + list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { + cb_pos->eve_cb(&payload[0], cb_pos->agent_data); + is_callback_found = true; + } } } if (!is_callback_found) @@ -356,12 +445,16 @@ static void xlnx_call_notify_cb_handler(const u32 *payload) struct registered_event_data *eve_data; u64 key = ((u64)payload[1] << 32U) | (u64)payload[2]; int ret; + struct agent_cb *cb_pos; + struct agent_cb *cb_next; /* Check for existing entry in hash table for given key id */ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { if (eve_data->key == key) { - eve_data->eve_cb(&payload[0], eve_data->agent_data); - is_callback_found = true; + list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { + cb_pos->eve_cb(&payload[0], cb_pos->agent_data); + is_callback_found = true; + } /* re register with firmware to get future events */ ret = zynqmp_pm_register_notifier(payload[1], payload[2], @@ -369,9 +462,13 @@ static void xlnx_call_notify_cb_handler(const u32 *payload) if (ret) { pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, payload[1], payload[2], ret); - /* Remove already registered event from hash table */ - xlnx_remove_cb_for_notify_event(payload[1], payload[2], - eve_data->eve_cb); + list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, + list) { + /* Remove already registered event from hash table */ + xlnx_remove_cb_for_notify_event(payload[1], payload[2], + cb_pos->eve_cb, + cb_pos->agent_data); + } } } } @@ -572,8 +669,14 @@ static int xlnx_event_manager_remove(struct platform_device *pdev) struct registered_event_data *eve_data; struct hlist_node *tmp; int ret; + struct agent_cb *cb_pos; + struct agent_cb *cb_next; hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) { + list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { + list_del_init(&cb_pos->list); + kfree(cb_pos); + } hash_del(&eve_data->hentry); kfree(eve_data); } diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index 859dd31b6eff..78a8a7545d1e 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -208,7 +208,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev) GFP_KERNEL); if (!zynqmp_pm_init_suspend_work) { xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, - suspend_event_callback); + suspend_event_callback, NULL); return -ENOMEM; } event_registered = true; @@ -263,7 +263,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev) ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); if (ret) { if (event_registered) { - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback); + xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, + NULL); event_registered = false; } dev_err(&pdev->dev, "unable to create sysfs interface\n"); @@ -277,7 +278,7 @@ static int zynqmp_pm_remove(struct platform_device *pdev) { sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); if (event_registered) - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback); + xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL); if (!rx_chan) mbox_free_channel(rx_chan); diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 354d3f89366f..a2bfb0434a67 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -536,11 +536,9 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) { int ret; - ret = pm_runtime_get_sync(&slave->dev); - if (ret < 0 && ret != -EACCES) { - pm_runtime_put_noidle(&slave->dev); + ret = pm_runtime_resume_and_get(&slave->dev); + if (ret < 0 && ret != -EACCES) return ret; - } ret = sdw_nread_no_pm(slave, addr, count, val); @@ -562,11 +560,9 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) { int ret; - ret = pm_runtime_get_sync(&slave->dev); - if (ret < 0 && ret != -EACCES) { - pm_runtime_put_noidle(&slave->dev); + ret = pm_runtime_resume_and_get(&slave->dev); + if (ret < 0 && ret != -EACCES) return ret; - } ret = sdw_nwrite_no_pm(slave, addr, count, val); @@ -1506,10 +1502,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); - ret = pm_runtime_get_sync(&slave->dev); + ret = pm_runtime_resume_and_get(&slave->dev); if (ret < 0 && ret != -EACCES) { dev_err(&slave->dev, "Failed to resume device: %d\n", ret); - pm_runtime_put_noidle(&slave->dev); return ret; } @@ -1838,6 +1833,18 @@ int sdw_handle_slave_status(struct sdw_bus *bus, __func__, slave->dev_num); complete(&slave->initialization_complete); + + /* + * If the manager became pm_runtime active, the peripherals will be + * restarted and attach, but their pm_runtime status may remain + * suspended. If the 'update_slave_status' callback initiates + * any sort of deferred processing, this processing would not be + * cancelled on pm_runtime suspend. + * To avoid such zombie states, we queue a request to resume. + * This would be a no-op in case the peripheral was being resumed + * by e.g. the ALSA/ASoC framework. + */ + pm_request_resume(&slave->dev); } } diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 558390af44b6..4fbb19557f5e 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -386,12 +386,11 @@ static int cdns_parity_error_injection(void *data, u64 value) * Resume Master device. If this results in a bus reset, the * Slave devices will re-attach and be re-enumerated. */ - ret = pm_runtime_get_sync(bus->dev); + ret = pm_runtime_resume_and_get(bus->dev); if (ret < 0 && ret != -EACCES) { dev_err_ratelimited(cdns->dev, - "pm_runtime_get_sync failed in %s, ret %d\n", + "pm_runtime_resume_and_get failed in %s, ret %d\n", __func__, ret); - pm_runtime_put_noidle(bus->dev); return ret; } @@ -959,6 +958,8 @@ static void cdns_update_slave_status_work(struct work_struct *work) container_of(work, struct sdw_cdns, work); u32 slave0, slave1; u64 slave_intstat; + u32 device0_status; + int retry_count = 0; slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0); slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1); @@ -968,10 +969,45 @@ static void cdns_update_slave_status_work(struct work_struct *work) dev_dbg_ratelimited(cdns->dev, "Slave status change: 0x%llx\n", slave_intstat); +update_status: cdns_update_slave_status(cdns, slave_intstat); cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0); cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1); + /* + * When there is more than one peripheral per link, it's + * possible that a deviceB becomes attached after we deal with + * the attachment of deviceA. Since the hardware does a + * logical AND, the attachment of the second device does not + * change the status seen by the driver. + * + * In that case, clearing the registers above would result in + * the deviceB never being detected - until a change of status + * is observed on the bus. + * + * To avoid this race condition, re-check if any device0 needs + * attention with PING commands. There is no need to check for + * ALERTS since they are not allowed until a non-zero + * device_number is assigned. + */ + + device0_status = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT); + device0_status &= 3; + + if (device0_status == SDW_SLAVE_ATTACHED) { + if (retry_count++ < SDW_MAX_DEVICES) { + dev_dbg_ratelimited(cdns->dev, + "Device0 detected after clearing status, iteration %d\n", + retry_count); + slave_intstat = CDNS_MCP_SLAVE_INTSTAT_ATTACHED; + goto update_status; + } else { + dev_err_ratelimited(cdns->dev, + "Device0 detected after %d iterations\n", + retry_count); + } + } + /* clear and unmask Slave interrupt now */ cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK); cdns_updatel(cdns, CDNS_MCP_INTMASK, diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 63101f1ba271..505c5ef061e3 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -799,12 +799,11 @@ static int intel_startup(struct snd_pcm_substream *substream, struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); int ret; - ret = pm_runtime_get_sync(cdns->dev); + ret = pm_runtime_resume_and_get(cdns->dev); if (ret < 0 && ret != -EACCES) { dev_err_ratelimited(cdns->dev, - "pm_runtime_get_sync failed in %s, ret %d\n", + "pm_runtime_resume_and_get failed in %s, ret %d\n", __func__, ret); - pm_runtime_put_noidle(cdns->dev); return ret; } return 0; @@ -1293,6 +1292,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev, /* use generic bandwidth allocation algorithm */ sdw->cdns.bus.compute_params = sdw_compute_params; + /* avoid resuming from pm_runtime suspend if it's not required */ + dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); + ret = sdw_bus_master_add(bus, dev, dev->fwnode); if (ret) { dev_err(dev, "sdw_bus_master_add fail: %d\n", ret); @@ -1828,6 +1830,9 @@ static int __maybe_unused intel_resume_runtime(struct device *dev) return 0; } + /* unconditionally disable WAKEEN interrupt */ + intel_shim_wake(sdw, false); + link_flags = md_flags >> (bus->link_id * 8); multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index da1ad7ebb1aa..22b706350ead 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -105,7 +105,7 @@ #define SWRM_SPECIAL_CMD_ID 0xF #define MAX_FREQ_NUM 1 -#define TIMEOUT_MS (2 * HZ) +#define TIMEOUT_MS 100 #define QCOM_SWRM_MAX_RD_LEN 0x1 #define QCOM_SDW_MAX_PORTS 14 #define DEFAULT_CLK_FREQ 9600000 @@ -510,12 +510,12 @@ static irqreturn_t qcom_swrm_wake_irq_handler(int irq, void *dev_id) struct qcom_swrm_ctrl *swrm = dev_id; int ret; - ret = pm_runtime_get_sync(swrm->dev); + ret = pm_runtime_resume_and_get(swrm->dev); if (ret < 0 && ret != -EACCES) { dev_err_ratelimited(swrm->dev, - "pm_runtime_get_sync failed in %s, ret %d\n", + "pm_runtime_resume_and_get failed in %s, ret %d\n", __func__, ret); - pm_runtime_put_noidle(swrm->dev); + return ret; } if (swrm->wake_irq > 0) { @@ -1058,12 +1058,11 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *codec_dai; int ret, i; - ret = pm_runtime_get_sync(ctrl->dev); + ret = pm_runtime_resume_and_get(ctrl->dev); if (ret < 0 && ret != -EACCES) { dev_err_ratelimited(ctrl->dev, - "pm_runtime_get_sync failed in %s, ret %d\n", + "pm_runtime_resume_and_get failed in %s, ret %d\n", __func__, ret); - pm_runtime_put_noidle(ctrl->dev); return ret; } @@ -1252,12 +1251,12 @@ static int swrm_reg_show(struct seq_file *s_file, void *data) struct qcom_swrm_ctrl *swrm = s_file->private; int reg, reg_val, ret; - ret = pm_runtime_get_sync(swrm->dev); + ret = pm_runtime_resume_and_get(swrm->dev); if (ret < 0 && ret != -EACCES) { dev_err_ratelimited(swrm->dev, - "pm_runtime_get_sync failed in %s, ret %d\n", + "pm_runtime_resume_and_get failed in %s, ret %d\n", __func__, ret); - pm_runtime_put_noidle(swrm->dev); + return ret; } for (reg = 0; reg <= SWR_MSTR_MAX_REG_ADDR; reg += 4) { @@ -1452,7 +1451,7 @@ static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *swrm) } while (retry--); dev_err(swrm->dev, "%s: link status not %s\n", __func__, - comp_sts && SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected"); + comp_sts & SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected"); return false; } @@ -1549,6 +1548,7 @@ static const struct dev_pm_ops swrm_dev_pm_ops = { static const struct of_device_id qcom_swrm_of_match[] = { { .compatible = "qcom,soundwire-v1.3.0", .data = &swrm_v1_3_data }, { .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data }, + { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_5_data }, {/* sentinel */}, }; diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index f273459b2023..d34150559142 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -822,6 +822,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream) } else if (multi_link) { dev_err(bus->dev, "Post bank switch ops not implemented\n"); + ret = -EINVAL; goto error; } diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c index 71c709771676..52b8957c19c9 100644 --- a/drivers/staging/iio/cdc/ad7746.c +++ b/drivers/staging/iio/cdc/ad7746.c @@ -290,7 +290,7 @@ static inline ssize_t ad7746_start_calib(struct device *dev, int ret, timeout = 10; bool doit; - ret = strtobool(buf, &doit); + ret = kstrtobool(buf, &doit); if (ret < 0) return ret; diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 793918e1c45f..f177b20f0f2d 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -749,7 +749,6 @@ static int ad5933_probe(struct i2c_client *client, indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ad5933_ring_setup_ops); if (ret) return ret; diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c index 89c0fc7b0cbc..8f39cc8bb034 100644 --- a/drivers/uio/uio_dfl.c +++ b/drivers/uio/uio_dfl.c @@ -45,9 +45,11 @@ static int uio_dfl_probe(struct dfl_device *ddev) } #define FME_FEATURE_ID_ETH_GROUP 0x10 +#define FME_FEATURE_ID_HSSI_SUBSYS 0x15 static const struct dfl_device_id uio_dfl_ids[] = { { FME_ID, FME_FEATURE_ID_ETH_GROUP }, + { FME_ID, FME_FEATURE_ID_HSSI_SUBSYS }, { } }; MODULE_DEVICE_TABLE(dfl, uio_dfl_ids); diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index ee0863c6553e..6e6ef8c0bc7e 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c @@ -95,8 +95,8 @@ static int omap_otg_probe(struct platform_device *pdev) return -ENODEV; extcon = extcon_get_extcon_dev(config->extcon); - if (!extcon) - return -EPROBE_DEFER; + if (IS_ERR(extcon)) + return PTR_ERR(extcon); otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); if (!otg_dev) diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index 72f9001b0792..96c55eaf3f80 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -1708,8 +1708,8 @@ static int fusb302_probe(struct i2c_client *client, */ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) { chip->extcon = extcon_get_extcon_dev(name); - if (!chip->extcon) - return -EPROBE_DEFER; + if (IS_ERR(chip->extcon)) + return PTR_ERR(chip->extcon); } chip->vbus = devm_regulator_get(chip->dev, "vbus"); diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c index e49bec8bc8a4..07035249a5e1 100644 --- a/drivers/virt/fsl_hypervisor.c +++ b/drivers/virt/fsl_hypervisor.c @@ -659,7 +659,6 @@ static int fsl_hv_open(struct inode *inode, struct file *filp) { struct doorbell_queue *dbq; unsigned long flags; - int ret = 0; dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL); if (!dbq) { @@ -676,7 +675,7 @@ static int fsl_hv_open(struct inode *inode, struct file *filp) filp->private_data = dbq; - return ret; + return 0; } /* diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index f6664fc9596a..0eb560fc0153 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c @@ -172,8 +172,9 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index) err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000); if (err < 0) { - pr_err("Failed to send command control message %x.%x: err=%d.\n", - value, index, err); + dev_err(&dev->udev->dev, + "Failed to send command control message %x.%x: err=%d.\n", + value, index, err); return err; } @@ -187,8 +188,9 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index) err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), MODE_CMD, VENDOR, value, index, NULL, 0, 1000); if (err < 0) { - pr_err("Failed to send mode control message %x.%x: err=%d.\n", - value, index, err); + dev_err(&dev->udev->dev, + "Failed to send mode control message %x.%x: err=%d.\n", + value, index, err); return err; } @@ -202,72 +204,68 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index) err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), COMM_CMD, VENDOR, value, index, NULL, 0, 1000); if (err < 0) { - pr_err("Failed to send control message %x.%x: err=%d.\n", - value, index, err); + dev_err(&dev->udev->dev, + "Failed to send control message %x.%x: err=%d.\n", + value, index, err); return err; } return err; } -static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off) -{ - pr_info("%45s: %8x\n", str, buf[off]); -} - -static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count) +static void ds_dump_status(struct ds_device *ds_dev, unsigned char *buf, int count) { + struct device *dev = &ds_dev->udev->dev; int i; - dev_info(&dev->udev->dev, "ep_status=0x%x, count=%d, status=%*phC", - dev->ep[EP_STATUS], count, count, buf); + dev_info(dev, "ep_status=0x%x, count=%d, status=%*phC", + ds_dev->ep[EP_STATUS], count, count, buf); if (count >= 16) { - ds_print_msg(buf, "enable flag", 0); - ds_print_msg(buf, "1-wire speed", 1); - ds_print_msg(buf, "strong pullup duration", 2); - ds_print_msg(buf, "programming pulse duration", 3); - ds_print_msg(buf, "pulldown slew rate control", 4); - ds_print_msg(buf, "write-1 low time", 5); - ds_print_msg(buf, "data sample offset/write-0 recovery time", - 6); - ds_print_msg(buf, "reserved (test register)", 7); - ds_print_msg(buf, "device status flags", 8); - ds_print_msg(buf, "communication command byte 1", 9); - ds_print_msg(buf, "communication command byte 2", 10); - ds_print_msg(buf, "communication command buffer status", 11); - ds_print_msg(buf, "1-wire data output buffer status", 12); - ds_print_msg(buf, "1-wire data input buffer status", 13); - ds_print_msg(buf, "reserved", 14); - ds_print_msg(buf, "reserved", 15); + dev_dbg(dev, "enable flag: 0x%02x", buf[0]); + dev_dbg(dev, "1-wire speed: 0x%02x", buf[1]); + dev_dbg(dev, "strong pullup duration: 0x%02x", buf[2]); + dev_dbg(dev, "programming pulse duration: 0x%02x", buf[3]); + dev_dbg(dev, "pulldown slew rate control: 0x%02x", buf[4]); + dev_dbg(dev, "write-1 low time: 0x%02x", buf[5]); + dev_dbg(dev, "data sample offset/write-0 recovery time: 0x%02x", buf[6]); + dev_dbg(dev, "reserved (test register): 0x%02x", buf[7]); + dev_dbg(dev, "device status flags: 0x%02x", buf[8]); + dev_dbg(dev, "communication command byte 1: 0x%02x", buf[9]); + dev_dbg(dev, "communication command byte 2: 0x%02x", buf[10]); + dev_dbg(dev, "communication command buffer status: 0x%02x", buf[11]); + dev_dbg(dev, "1-wire data output buffer status: 0x%02x", buf[12]); + dev_dbg(dev, "1-wire data input buffer status: 0x%02x", buf[13]); + dev_dbg(dev, "reserved: 0x%02x", buf[14]); + dev_dbg(dev, "reserved: 0x%02x", buf[15]); } + for (i = 16; i < count; ++i) { if (buf[i] == RR_DETECT) { - ds_print_msg(buf, "new device detect", i); + dev_dbg(dev, "New device detect.\n"); continue; } - ds_print_msg(buf, "Result Register Value: ", i); + dev_dbg(dev, "Result Register Value: 0x%02x", buf[i]); if (buf[i] & RR_NRS) - pr_info("NRS: Reset no presence or ...\n"); + dev_dbg(dev, "NRS: Reset no presence or ...\n"); if (buf[i] & RR_SH) - pr_info("SH: short on reset or set path\n"); + dev_dbg(dev, "SH: short on reset or set path\n"); if (buf[i] & RR_APP) - pr_info("APP: alarming presence on reset\n"); + dev_dbg(dev, "APP: alarming presence on reset\n"); if (buf[i] & RR_VPP) - pr_info("VPP: 12V expected not seen\n"); + dev_dbg(dev, "VPP: 12V expected not seen\n"); if (buf[i] & RR_CMP) - pr_info("CMP: compare error\n"); + dev_dbg(dev, "CMP: compare error\n"); if (buf[i] & RR_CRC) - pr_info("CRC: CRC error detected\n"); + dev_dbg(dev, "CRC: CRC error detected\n"); if (buf[i] & RR_RDP) - pr_info("RDP: redirected page\n"); + dev_dbg(dev, "RDP: redirected page\n"); if (buf[i] & RR_EOS) - pr_info("EOS: end of search error\n"); + dev_dbg(dev, "EOS: end of search error\n"); } } -static int ds_recv_status(struct ds_device *dev, struct ds_status *st, - bool dump) +static int ds_recv_status(struct ds_device *dev, struct ds_status *st) { int count, err; @@ -281,14 +279,12 @@ static int ds_recv_status(struct ds_device *dev, struct ds_status *st, dev->st_buf, sizeof(dev->st_buf), &count, 1000); if (err < 0) { - pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n", - dev->ep[EP_STATUS], err); + dev_err(&dev->udev->dev, + "Failed to read 1-wire data from 0x%x: err=%d.\n", + dev->ep[EP_STATUS], err); return err; } - if (dump) - ds_dump_status(dev, dev->st_buf, count); - if (st && count >= sizeof(*st)) memcpy(st, dev->st_buf, sizeof(*st)); @@ -302,13 +298,15 @@ static void ds_reset_device(struct ds_device *dev) * the strong pullup. */ if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE)) - pr_err("ds_reset_device: Error allowing strong pullup\n"); + dev_err(&dev->udev->dev, + "%s: Error allowing strong pullup\n", __func__); /* Chip strong pullup time was cleared. */ if (dev->spu_sleep) { /* lower 4 bits are 0, see ds_set_pullup */ u8 del = dev->spu_sleep>>4; if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del)) - pr_err("ds_reset_device: Error setting duration\n"); + dev_err(&dev->udev->dev, + "%s: Error setting duration\n", __func__); } } @@ -329,9 +327,16 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), buf, size, &count, 1000); if (err < 0) { + int recv_len; + dev_info(&dev->udev->dev, "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); - ds_recv_status(dev, NULL, true); + + /* status might tell us why endpoint is stuck? */ + recv_len = ds_recv_status(dev, NULL); + if (recv_len >= 0) + ds_dump_status(dev, dev->st_buf, recv_len); + return err; } @@ -355,7 +360,7 @@ static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len) count = 0; err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000); if (err < 0) { - pr_err("Failed to write 1-wire data to ep0x%x: " + dev_err(&dev->udev->dev, "Failed to write 1-wire data to ep0x%x: " "err=%d.\n", dev->ep[EP_DATA_OUT], err); return err; } @@ -377,7 +382,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit) err = ds_send_control(dev, CTL_RESUME_EXE, 0); if (err) break; - err = ds_recv_status(dev, &st, false); + err = ds_recv_status(dev, &st); if (err) break; @@ -424,7 +429,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st) do { st->status = 0; - err = ds_recv_status(dev, st, false); + err = ds_recv_status(dev, st); #if 0 if (err >= 0) { int i; @@ -437,7 +442,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st) } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100); if (err >= 16 && st->status & ST_EPOF) { - pr_info("Resetting device after ST_EPOF.\n"); + dev_info(&dev->udev->dev, "Resetting device after ST_EPOF.\n"); ds_reset_device(dev); /* Always dump the device status. */ count = 101; @@ -721,7 +726,7 @@ static void ds9490r_search(void *data, struct w1_master *master, do { schedule_timeout(jtime); - err = ds_recv_status(dev, &st, false); + err = ds_recv_status(dev, &st); if (err < 0 || err < sizeof(st)) break; @@ -992,10 +997,9 @@ static int ds_probe(struct usb_interface *intf, int i, err, alt; dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL); - if (!dev) { - pr_info("Failed to allocate new DS9490R structure.\n"); + if (!dev) return -ENOMEM; - } + dev->udev = usb_get_dev(udev); if (!dev->udev) { err = -ENOMEM; @@ -1025,7 +1029,7 @@ static int ds_probe(struct usb_interface *intf, iface_desc = intf->cur_altsetting; if (iface_desc->desc.bNumEndpoints != NUM_EP-1) { - pr_info("Num endpoints=%d. It is not DS9490R.\n", + dev_err(&dev->udev->dev, "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints); err = -EINVAL; goto err_out_clear; |