The MSC MON_SEL register needs to be accessed from hardirq for the overflow interrupt, and when taking an IPI to access these registers on platforms where MSC are not accesible from every CPU. This makes an irqsave spinlock the obvious lock to protect these registers. On systems with SCMI mailboxes it must be able to sleep, meaning a mutex must be used. The SCMI platforms can't support an overflow interrupt. Clearly these two can't exist for one MSC at the same time. Add helpers for the MON_SEL locking. The outer lock must be taken in a pre-emptible context before the inner lock can be taken. On systems with SCMI mailboxes where the MON_SEL accesses must sleep - the inner lock will fail to be 'taken' if the caller is unable to sleep. This will allow callers to fail without having to explicitly check the interface type of each MSC. Signed-off-by: James Morse <james.morse@xxxxxxx> --- Change since v1: * Made accesses to outer_lock_held READ_ONCE() for torn values in the failure case. --- drivers/resctrl/mpam_devices.c | 3 +-- drivers/resctrl/mpam_internal.h | 37 +++++++++++++++++++++++++++++---- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/resctrl/mpam_devices.c b/drivers/resctrl/mpam_devices.c index 24dc81c15ec8..a26b012452e2 100644 --- a/drivers/resctrl/mpam_devices.c +++ b/drivers/resctrl/mpam_devices.c @@ -748,8 +748,7 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) mutex_init(&msc->probe_lock); mutex_init(&msc->part_sel_lock); - mutex_init(&msc->outer_mon_sel_lock); - raw_spin_lock_init(&msc->inner_mon_sel_lock); + mpam_mon_sel_lock_init(msc); msc->id = pdev->id; msc->pdev = pdev; INIT_LIST_HEAD_RCU(&msc->all_msc_list); diff --git a/drivers/resctrl/mpam_internal.h b/drivers/resctrl/mpam_internal.h index 828ce93c95d5..4cc44d4e21c4 100644 --- a/drivers/resctrl/mpam_internal.h +++ b/drivers/resctrl/mpam_internal.h @@ -70,12 +70,17 @@ struct mpam_msc { /* * mon_sel_lock protects access to the MSC hardware registers that are - * affected by MPAMCFG_MON_SEL. + * affected by MPAMCFG_MON_SEL, and the mbwu_state. + * Access to mon_sel is needed from both process and interrupt contexts, + * but is complicated by firmware-backed platforms that can't make any + * access unless they can sleep. + * Always use the mpam_mon_sel_lock() helpers. + * Accessed to mon_sel need to be able to fail if they occur in the wrong + * context. * If needed, take msc->probe_lock first. */ - struct mutex outer_mon_sel_lock; - raw_spinlock_t inner_mon_sel_lock; - unsigned long inner_mon_sel_flags; + raw_spinlock_t _mon_sel_lock; + unsigned long _mon_sel_flags; void __iomem *mapped_hwpage; size_t mapped_hwpage_sz; @@ -83,6 +88,30 @@ struct mpam_msc { struct mpam_garbage garbage; }; +/* Returning false here means accesses to mon_sel must fail and report an error. */ +static inline bool __must_check mpam_mon_sel_lock(struct mpam_msc *msc) +{ + WARN_ON_ONCE(msc->iface != MPAM_IFACE_MMIO); + + raw_spin_lock_irqsave(&msc->_mon_sel_lock, msc->_mon_sel_flags); + return true; +} + +static inline void mpam_mon_sel_unlock(struct mpam_msc *msc) +{ + raw_spin_unlock_irqrestore(&msc->_mon_sel_lock, msc->_mon_sel_flags); +} + +static inline void mpam_mon_sel_lock_held(struct mpam_msc *msc) +{ + lockdep_assert_held_once(&msc->_mon_sel_lock); +} + +static inline void mpam_mon_sel_lock_init(struct mpam_msc *msc) +{ + raw_spin_lock_init(&msc->_mon_sel_lock); +} + struct mpam_class { /* mpam_components in this class */ struct list_head components; -- 2.39.5