forked from nuttx/nuttx-update
arch/all: in smp pthread_cancel occasionally deadlock except for arm64
please reference the issue here for more information: https://github.com/apache/nuttx/pull/9065 Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
c60d59d825
commit
35b597ec2c
10 changed files with 147 additions and 0 deletions
|
@ -58,6 +58,7 @@
|
|||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -136,6 +137,10 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
/* Wait for the spinlock to be released. The requesting CPU will release
|
||||
* the spinlock when the CPU is resumed.
|
||||
*/
|
||||
|
@ -164,6 +169,7 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
arm_restorestate(tcb->xcp.regs);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -320,6 +326,13 @@ int up_cpu_resume(int cpu)
|
|||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -136,6 +137,10 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
/* Wait for the spinlock to be released. The requesting CPU will release
|
||||
* the spinlock when the CPU is resumed.
|
||||
*/
|
||||
|
@ -164,6 +169,7 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
arm_restorestate(tcb->xcp.regs);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -320,6 +326,13 @@ int up_cpu_resume(int cpu)
|
|||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,6 +72,7 @@
|
|||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
static volatile int g_irq_to_handle[CONFIG_SMP_NCPUS][2];
|
||||
|
||||
|
@ -222,6 +223,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -246,6 +252,7 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
arm_restorestate(tcb->xcp.regs);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -418,6 +425,12 @@ int up_cpu_resume(int cpu)
|
|||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -463,6 +476,12 @@ void up_send_irqreq(int idx, int irq, int cpu)
|
|||
/* Finally unlock the spinlock to proceed the handler */
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -68,6 +68,7 @@
|
|||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -143,6 +144,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -169,6 +175,8 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -343,5 +351,11 @@ int up_cpu_resume(int cpu)
|
|||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Name: rp2040_handle_irqreq
|
||||
|
@ -183,6 +184,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -207,6 +213,7 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
arm_restorestate(tcb->xcp.regs);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -393,6 +400,12 @@ int up_cpu_resume(int cpu)
|
|||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -432,6 +445,12 @@ void rp2040_send_irqreq(int irqreq)
|
|||
/* Finally unlock the spinlock to proceed the handler */
|
||||
|
||||
spin_unlock(&g_cpu_wait[0]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[0]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[0]);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -145,6 +146,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -169,6 +175,7 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
arm_restorestate(tcb->xcp.regs);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -333,6 +340,13 @@ int up_cpu_resume(int cpu)
|
|||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -133,6 +134,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -158,6 +164,7 @@ int up_cpu_paused(int cpu)
|
|||
riscv_restorestate(tcb->xcp.regs);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -317,5 +324,11 @@ int up_cpu_resume(int cpu)
|
|||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
|
@ -176,6 +177,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -209,6 +215,7 @@ int up_cpu_paused(int cpu)
|
|||
|
||||
sim_restorestate(tcb->xcp.regs);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -394,5 +401,12 @@ int up_cpu_resume(int cpu)
|
|||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -133,6 +134,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -158,6 +164,7 @@ int up_cpu_paused(int cpu)
|
|||
sparc_restorestate(tcb->xcp.regs);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -320,5 +327,11 @@ int up_cpu_resume(int cpu)
|
|||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
|
||||
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -118,6 +119,11 @@ int up_cpu_paused(int cpu)
|
|||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
|
@ -143,6 +149,8 @@ int up_cpu_paused(int cpu)
|
|||
xtensa_restorestate(tcb->xcp.regs);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -307,6 +315,13 @@ int up_cpu_resume(int cpu)
|
|||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue