diff options
author | Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> | 2006-09-26 15:27:56 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-09-26 15:27:56 -0700 |
commit | ddb4f0df0424d174567a011a176782ffa4202071 (patch) | |
tree | 81eeb2f18b3a4a295c87673493c2b113c6cf9393 /arch/ia64 | |
parent | b29e7132b5a9f2496beed37beef7ba4d010afb2c (diff) |
[IA64] CMC/CPE: Reverse the order of fetching log and checking poll threshold
This patch reverses the order of fetching log from SAL and
checking poll threshold. This will fix following trivial issues:
- If SAL_GET_SATE_INFO is unbelievably slow (due to huge system
or just its silly implementation) and if it takes more than
1/5 sec, CMCI/CPEI will never switch to CMCP/CPEP.
- Assuming terrible flood of interrupt (continuous corrected
errors let all CPUs enter to handler at once and bind them
in it), CPUs will be serialized by IA64_LOG_LOCK(*).
Now we check the poll threshold after the lock and log fetch,
so we need to call SAL_GET_STATE_INFO (num_online_cpus() + 4)
times in the worst case.
if we can check the threshold before the lock, we can shut up
interrupts quickly without waiting preceding log fetches, and
the number of times will be reduced to (num_online_cpus()) in
the same situation.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kernel/mca.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 98f3b26d7aff..bfbd8986153b 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -511,9 +511,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) /* SAL spec states this should run w/ interrupts enabled */ local_irq_enable(); - /* Get the CPE error record and log it */ - ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); - spin_lock(&cpe_history_lock); if (!cpe_poll_enabled && cpe_vector >= 0) { @@ -542,7 +539,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); /* lock already released, get out now */ - return IRQ_HANDLED; + goto out; } else { cpe_history[index++] = now; if (index == CPE_HISTORY_LENGTH) @@ -550,6 +547,10 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) } } spin_unlock(&cpe_history_lock); +out: + /* Get the CPE error record and log it */ + ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); + return IRQ_HANDLED; } @@ -1278,9 +1279,6 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) /* SAL spec states this should run w/ interrupts enabled */ local_irq_enable(); - /* Get the CMC error record and log it */ - ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); - spin_lock(&cmc_history_lock); if (!cmc_polling_enabled) { int i, count = 1; /* we know 1 happened now */ @@ -1313,7 +1311,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); /* lock already released, get out now */ - return IRQ_HANDLED; + goto out; } else { cmc_history[index++] = now; if (index == CMC_HISTORY_LENGTH) @@ -1321,6 +1319,10 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) } } spin_unlock(&cmc_history_lock); +out: + /* Get the CMC error record and log it */ + ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); + return IRQ_HANDLED; } |