mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
ATL2 hardware was missing descriptor cache invalidation in hw_stop(), causing SMMU translation faults during device shutdown and module removal: [ 70.355743] arm-smmu-v3 arm-smmu-v3.5.auto: event 0x10 received: [ 70.361893] arm-smmu-v3 arm-smmu-v3.5.auto: 0x0002060000000010 [ 70.367948] arm-smmu-v3 arm-smmu-v3.5.auto: 0x0000020000000000 [ 70.374002] arm-smmu-v3 arm-smmu-v3.5.auto: 0x00000000ff9bc000 [ 70.380055] arm-smmu-v3 arm-smmu-v3.5.auto: 0x0000000000000000 [ 70.386109] arm-smmu-v3 arm-smmu-v3.5.auto: event: F_TRANSLATION client: 0001:06:00.0 sid: 0x20600 ssid: 0x0 iova: 0xff9bc000 ipa: 0x0 [ 70.398531] arm-smmu-v3 arm-smmu-v3.5.auto: unpriv data write s1 "Input address caused fault" stag: 0x0 Commit7a1bb49461("net: aquantia: fix potential IOMMU fault after driver unbind") and commited4d81c4b3("net: aquantia: when cleaning hw cache it should be toggled") fixed cache invalidation for ATL B0, but ATL2 was left with only interrupt disabling. This allowed hardware to write to cached descriptors after DMA memory was unmapped, triggering SMMU faults. Once cache invalidation is applied to ATL2, the translation fault can't be observed anymore. Add shared aq_hw_invalidate_descriptor_cache() helper and use it in both ATL B0 and ATL2 hw_stop() implementations for consistent behavior. Fixes:e54dcf4bba("net: atlantic: basic A2 init/deinit hw_ops") Tested-by: Carol Soto <csoto@nvidia.com> Signed-off-by: Kai-Heng Feng <kaihengf@nvidia.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://patch.msgid.link/20251120041537.62184-1-kaihengf@nvidia.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
148 lines
3.0 KiB
C
148 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Atlantic Network Driver
|
|
*
|
|
* Copyright (C) 2014-2019 aQuantia Corporation
|
|
* Copyright (C) 2019-2020 Marvell International Ltd.
|
|
*/
|
|
|
|
/* File aq_hw_utils.c: Definitions of helper functions used across
|
|
* hardware layer.
|
|
*/
|
|
|
|
#include "aq_hw_utils.h"
|
|
|
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
|
|
|
#include "aq_hw.h"
|
|
#include "aq_nic.h"
|
|
#include "hw_atl/hw_atl_llh.h"
|
|
|
|
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
|
|
u32 shift, u32 val)
|
|
{
|
|
if (msk ^ ~0) {
|
|
u32 reg_old, reg_new;
|
|
|
|
reg_old = aq_hw_read_reg(aq_hw, addr);
|
|
reg_new = (reg_old & (~msk)) | (val << shift);
|
|
|
|
if (reg_old != reg_new)
|
|
aq_hw_write_reg(aq_hw, addr, reg_new);
|
|
} else {
|
|
aq_hw_write_reg(aq_hw, addr, val);
|
|
}
|
|
}
|
|
|
|
u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
|
|
{
|
|
return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
|
|
}
|
|
|
|
u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
|
|
{
|
|
u32 value = readl(hw->mmio + reg);
|
|
|
|
if (value == U32_MAX &&
|
|
readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
|
|
aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
|
|
|
|
return value;
|
|
}
|
|
|
|
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
|
|
{
|
|
writel(value, hw->mmio + reg);
|
|
}
|
|
|
|
/* Most of 64-bit registers are in LSW, MSW form.
|
|
Counters are normally implemented by HW as latched pairs:
|
|
reading LSW first locks MSW, to overcome LSW overflow
|
|
*/
|
|
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
|
|
{
|
|
u64 value = U64_MAX;
|
|
|
|
if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
|
|
value = readq(hw->mmio + reg);
|
|
else
|
|
value = lo_hi_readq(hw->mmio + reg);
|
|
|
|
if (value == U64_MAX &&
|
|
readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
|
|
aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
|
|
|
|
return value;
|
|
}
|
|
|
|
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
|
|
{
|
|
if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
|
|
writeq(value, hw->mmio + reg);
|
|
else
|
|
lo_hi_writeq(value, hw->mmio + reg);
|
|
}
|
|
|
|
int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
|
|
{
|
|
int err;
|
|
u32 val;
|
|
|
|
/* Invalidate Descriptor Cache to prevent writing to the cached
|
|
* descriptors and to the data pointer of those descriptors
|
|
*/
|
|
hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
|
|
|
|
err = aq_hw_err_from_flags(hw);
|
|
if (err)
|
|
goto err_exit;
|
|
|
|
readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
|
|
hw, val, val == 1, 1000U, 10000U);
|
|
|
|
err_exit:
|
|
return err;
|
|
}
|
|
|
|
int aq_hw_err_from_flags(struct aq_hw_s *hw)
|
|
{
|
|
int err = 0;
|
|
|
|
if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
|
|
err = -ENXIO;
|
|
goto err_exit;
|
|
}
|
|
if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
|
|
err = -EIO;
|
|
goto err_exit;
|
|
}
|
|
|
|
err_exit:
|
|
return err;
|
|
}
|
|
|
|
int aq_hw_num_tcs(struct aq_hw_s *hw)
|
|
{
|
|
switch (hw->aq_nic_cfg->tc_mode) {
|
|
case AQ_TC_MODE_8TCS:
|
|
return 8;
|
|
case AQ_TC_MODE_4TCS:
|
|
return 4;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
int aq_hw_q_per_tc(struct aq_hw_s *hw)
|
|
{
|
|
switch (hw->aq_nic_cfg->tc_mode) {
|
|
case AQ_TC_MODE_8TCS:
|
|
return 4;
|
|
case AQ_TC_MODE_4TCS:
|
|
return 8;
|
|
default:
|
|
return 4;
|
|
}
|
|
}
|