mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
scsi: ufs: core: Fix data race in CPU latency PM QoS request handling
The cpu_latency_qos_add/remove/update_request interfaces lack internal
synchronization by design, requiring the caller to ensure thread safety.
The current implementation relies on the 'pm_qos_enabled' flag, which is
insufficient to prevent concurrent access and cannot serve as a proper
synchronization mechanism. This has led to data races and list
corruption issues.
A typical race condition call trace is:
[Thread A]
ufshcd_pm_qos_exit()
--> cpu_latency_qos_remove_request()
--> cpu_latency_qos_apply();
--> pm_qos_update_target()
--> plist_del <--(1) delete plist node
--> memset(req, 0, sizeof(*req));
--> hba->pm_qos_enabled = false;
[Thread B]
ufshcd_devfreq_target
--> ufshcd_devfreq_scale
--> ufshcd_scale_clks
--> ufshcd_pm_qos_update <--(2) pm_qos_enabled is true
--> cpu_latency_qos_update_request
--> pm_qos_update_target
--> plist_del <--(3) plist node use-after-free
Introduces a dedicated mutex to serialize PM QoS operations, preventing
data races and ensuring safe access to PM QoS resources, including sysfs
interface reads.
Fixes: 2777e73fc1 ("scsi: ufs: core: Add CPU latency QoS support for UFS driver")
Signed-off-by: Zhongqiu Han <zhongqiu.han@oss.qualcomm.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Huan Tang <tanghuan@vivo.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
committed by
Martin K. Petersen
parent
072fdd4b0b
commit
79dde5f7dc
@@ -512,6 +512,8 @@ static ssize_t pm_qos_enable_show(struct device *dev,
|
||||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
|
||||
guard(mutex)(&hba->pm_qos_mutex);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
|
||||
}
|
||||
|
||||
|
||||
@@ -1045,6 +1045,7 @@ EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
|
||||
*/
|
||||
void ufshcd_pm_qos_init(struct ufs_hba *hba)
|
||||
{
|
||||
guard(mutex)(&hba->pm_qos_mutex);
|
||||
|
||||
if (hba->pm_qos_enabled)
|
||||
return;
|
||||
@@ -1061,6 +1062,8 @@ void ufshcd_pm_qos_init(struct ufs_hba *hba)
|
||||
*/
|
||||
void ufshcd_pm_qos_exit(struct ufs_hba *hba)
|
||||
{
|
||||
guard(mutex)(&hba->pm_qos_mutex);
|
||||
|
||||
if (!hba->pm_qos_enabled)
|
||||
return;
|
||||
|
||||
@@ -1075,6 +1078,8 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
|
||||
*/
|
||||
static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
|
||||
{
|
||||
guard(mutex)(&hba->pm_qos_mutex);
|
||||
|
||||
if (!hba->pm_qos_enabled)
|
||||
return;
|
||||
|
||||
@@ -10743,6 +10748,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
||||
mutex_init(&hba->ee_ctrl_mutex);
|
||||
|
||||
mutex_init(&hba->wb_mutex);
|
||||
|
||||
/* Initialize mutex for PM QoS request synchronization */
|
||||
mutex_init(&hba->pm_qos_mutex);
|
||||
|
||||
init_rwsem(&hba->clk_scaling_lock);
|
||||
|
||||
ufshcd_init_clk_gating(hba);
|
||||
|
||||
@@ -938,6 +938,7 @@ enum ufshcd_mcq_opr {
|
||||
* @ufs_rtc_update_work: A work for UFS RTC periodic update
|
||||
* @pm_qos_req: PM QoS request handle
|
||||
* @pm_qos_enabled: flag to check if pm qos is enabled
|
||||
* @pm_qos_mutex: synchronizes PM QoS request and status updates
|
||||
* @critical_health_count: count of critical health exceptions
|
||||
* @dev_lvl_exception_count: count of device level exceptions since last reset
|
||||
* @dev_lvl_exception_id: vendor specific information about the
|
||||
@@ -1110,6 +1111,8 @@ struct ufs_hba {
|
||||
struct delayed_work ufs_rtc_update_work;
|
||||
struct pm_qos_request pm_qos_req;
|
||||
bool pm_qos_enabled;
|
||||
/* synchronizes PM QoS request and status updates */
|
||||
struct mutex pm_qos_mutex;
|
||||
|
||||
int critical_health_count;
|
||||
atomic_t dev_lvl_exception_count;
|
||||
|
||||
Reference in New Issue
Block a user