summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2016-07-30 13:43:19 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-07-30 13:43:19 +1000
commit719dbb2df78fc9a40e28392b07cd715bfc5a665c (patch)
tree0bbc3b84b74178d18164f5522ee3715623f94bbe /arch/powerpc/kernel
parentfbef66f0adcddf4475e19f3d09df22fb34e633f6 (diff)
parent9f595fd8b54809fed13fc30906ef1e90a3fcfbc9 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott: "Highlights include more 8xx optimizations, device tree updates, and MVME7100 support."
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c31
-rw-r--r--arch/powerpc/kernel/entry_32.S17
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S4
-rw-r--r--arch/powerpc/kernel/head_8xx.S159
-rw-r--r--arch/powerpc/kernel/time.c81
6 files changed, 183 insertions, 115 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 5b99f956e32f..b89d14c0352c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -68,6 +68,10 @@
#include "../mm/mmu_decl.h"
#endif
+#ifdef CONFIG_PPC_8xx
+#include <asm/fixmap.h>
+#endif
+
int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -240,13 +244,28 @@ int main(void)
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default));
- DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
- DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user));
- DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
- DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
+ DEFINE(ACCOUNT_STARTTIME,
+ offsetof(struct paca_struct, accounting.starttime));
+ DEFINE(ACCOUNT_STARTTIME_USER,
+ offsetof(struct paca_struct, accounting.starttime_user));
+ DEFINE(ACCOUNT_USER_TIME,
+ offsetof(struct paca_struct, accounting.user_time));
+ DEFINE(ACCOUNT_SYSTEM_TIME,
+ offsetof(struct paca_struct, accounting.system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso));
+#else /* CONFIG_PPC64 */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ DEFINE(ACCOUNT_STARTTIME,
+ offsetof(struct thread_info, accounting.starttime));
+ DEFINE(ACCOUNT_STARTTIME_USER,
+ offsetof(struct thread_info, accounting.starttime_user));
+ DEFINE(ACCOUNT_USER_TIME,
+ offsetof(struct thread_info, accounting.user_time));
+ DEFINE(ACCOUNT_SYSTEM_TIME,
+ offsetof(struct thread_info, accounting.system_time));
+#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -734,5 +753,9 @@ int main(void)
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
+#ifdef CONFIG_PPC_8xx
+ DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 2405631e91a2..9899032230b4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -175,6 +175,12 @@ transfer_to_handler:
addi r12,r12,-1
stw r12,4(r11)
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ CURRENT_THREAD_INFO(r9, r1)
+ tophys(r9, r9)
+ ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
+#endif
+
b 3f
2: /* if from kernel, check interrupted DOZE/NAP mode and
@@ -398,6 +404,13 @@ BEGIN_FTR_SECTION
lwarx r7,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ andi. r4,r8,MSR_PR
+ beq 3f
+ CURRENT_THREAD_INFO(r4, r1)
+ ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
+3:
+#endif
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
@@ -769,6 +782,10 @@ restore_user:
andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ CURRENT_THREAD_INFO(r9, r1)
+ ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
+#endif
b restore
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2e0c565754aa..fcb2887f5a33 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -72,7 +72,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
std r0,GPR0(r1)
std r10,GPR1(r1)
beq 2f /* if from kernel mode */
- ACCOUNT_CPU_USER_ENTRY(r10, r11)
+ ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
2: std r2,GPR2(r1)
std r3,GPR3(r1)
mfcr r2
@@ -246,7 +246,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
ld r4,_LINK(r1)
beq- 1f
- ACCOUNT_CPU_USER_EXIT(r11, r12)
+ ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
BEGIN_FTR_SECTION
HMT_MEDIUM_LOW
@@ -859,7 +859,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
mtspr SPRN_PPR,r2 /* Restore PPR */
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ACCOUNT_CPU_USER_EXIT(r2, r4)
+ ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1)
1:
mtspr SPRN_SRR1,r3
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 2d3b40fd9bac..38a1f96430e1 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -386,7 +386,7 @@ exc_##n##_common: \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
beq 2f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \
+ ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \
2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
ld r4,excf+EX_R11(r13); /* get back r11 */ \
mfspr r5,scratch; /* get back r13 */ \
@@ -1059,7 +1059,7 @@ fast_exception_return:
andi. r6,r10,MSR_PR
REST_2GPRS(6, r1)
beq 1f
- ACCOUNT_CPU_USER_EXIT(r10, r11)
+ ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
ld r0,GPR13(r1)
1: stdcx. r0,0,r1 /* to clear the reservation */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 80c69472314e..43ddaae42baf 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -30,6 +30,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/fixmap.h>
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
@@ -383,28 +384,57 @@ InstructionTLBMiss:
EXCEPTION_EPILOG_0
rfi
+/*
+ * Bottom part of DataStoreTLBMiss handler for IMMR area
+ * not enough space in the DataStoreTLBMiss area
+ */
+DTLBMissIMMR:
+ mtcr r10
+ /* Set 512k byte guarded page and mark it valid */
+ li r10, MD_PS512K | MD_GUARDED | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+ mfspr r10, SPRN_IMMR /* Get current IMMR */
+ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+ _PAGE_PRESENT | _PAGE_NO_CACHE
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
+
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ EXCEPTION_EPILOG_0
+ rfi
+
. = 0x1200
DataStoreTLBMiss:
- mtspr SPRN_SPRG_SCRATCH2, r3
EXCEPTION_PROLOG_0
- mfcr r3
+ mfcr r10
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- mfspr r10, SPRN_MD_EPN
- IS_KERNEL(r11, r10)
+ mfspr r11, SPRN_MD_EPN
+ rlwinm r11, r11, 16, 0xfff8
+#ifndef CONFIG_PIN_TLB_IMMR
+ cmpli cr0, r11, VIRT_IMMR_BASE@h
+#endif
+ cmpli cr7, r11, PAGE_OFFSET@h
+#ifndef CONFIG_PIN_TLB_IMMR
+_ENTRY(DTLBMiss_jmp)
+ beq- DTLBMissIMMR
+#endif
+ bge- cr7, 4f
+
mfspr r11, SPRN_M_TW /* Get level 1 table */
- BRANCH_UNLESS_KERNEL(3f)
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
+ mtcr r10
+#ifdef CONFIG_8xx_CPU6
+ mtspr SPRN_SPRG_SCRATCH2, r3
+#endif
+ mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
- mtcr r11
- bt- 28,DTLBMiss8M /* bit 28 = Large page (8M) */
- mtcr r3
/* We have a pte table, so load fetch the pte from the table.
*/
@@ -452,29 +482,30 @@ DataStoreTLBMiss:
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
+#ifdef CONFIG_8xx_CPU6
mfspr r3, SPRN_SPRG_SCRATCH2
+#endif
mtspr SPRN_DAR, r11 /* Tag DAR */
EXCEPTION_EPILOG_0
rfi
-DTLBMiss8M:
- mtcr r3
- ori r11, r11, MD_SVALID
- MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
-#ifdef CONFIG_PPC_16K_PAGES
- /*
- * In 16k pages mode, each PGD entry defines a 64M block.
- * Here we select the 8M page within the block.
- */
- rlwimi r11, r10, 0, 0x03800000
-#endif
- rlwinm r10, r11, 0, 0xff800000
+4:
+_ENTRY(DTLBMiss_cmp)
+ cmpli cr0, r11, (PAGE_OFFSET + 0x1800000)@h
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
+ bge- 3b
+
+ mtcr r10
+ /* Set 8M byte page and mark it valid */
+ li r10, MD_PS8MEG | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+ mfspr r10, SPRN_MD_EPN
+ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
_PAGE_PRESENT
- MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
li r11, RPN_PATTERN
- mfspr r3, SPRN_SPRG_SCRATCH2
mtspr SPRN_DAR, r11 /* Tag DAR */
EXCEPTION_EPILOG_0
rfi
@@ -553,12 +584,14 @@ FixupDAR:/* Entry point for dcbx workaround. */
IS_KERNEL(r11, r10)
mfspr r11, SPRN_M_TW /* Get level 1 table */
BRANCH_UNLESS_KERNEL(3f)
+ rlwinm r11, r10, 16, 0xfff8
+_ENTRY(FixupDAR_cmp)
+ cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+ blt- cr7, 200f
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
/* Insert level 1 index */
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
- mtcr r11
- bt 28,200f /* bit 28 = Large page (8M) */
rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
/* Insert level 2 index */
rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
@@ -584,8 +617,8 @@ FixupDAR:/* Entry point for dcbx workaround. */
141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */
- /* concat physical page address(r11) and page offset(r10) */
-200: rlwimi r11, r10, 0, 32 - (PAGE_SHIFT << 1), 31
+ /* create physical page address from effective address */
+200: tophys(r11, r10)
b 201b
144: mfspr r10, SPRN_DSISR
@@ -763,10 +796,18 @@ start_here:
* virtual to physical. Also, set the cache mode since that is defined
* by TLB entries and perform any additional mapping (like of the IMMR).
* If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
- * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
+ * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
* these mappings is mapped by page tables.
*/
initial_mmu:
+ li r8, 0
+ mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
+ lis r10, MD_RESETVAL@h
+#ifndef CONFIG_8xx_COPYBACK
+ oris r10, r10, MD_WTDEF@h
+#endif
+ mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */
+
tlbia /* Invalidate all TLB entries */
/* Always pin the first 8 MB ITLB to prevent ITLB
misses while mucking around with SRR0/SRR1 in asm
@@ -777,34 +818,20 @@ initial_mmu:
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
#ifdef CONFIG_PIN_TLB
- lis r10, (MD_RSV4I | MD_RESETVAL)@h
- ori r10, r10, 0x1c00
- mr r8, r10
-#else
- lis r10, MD_RESETVAL@h
-#endif
-#ifndef CONFIG_8xx_COPYBACK
- oris r10, r10, MD_WTDEF@h
-#endif
+ oris r10, r10, MD_RSV4I@h
mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
+#endif
- /* Now map the lower 8 Meg into the TLBs. For this quick hack,
- * we can load the instruction and data TLB registers with the
- * same values.
- */
+ /* Now map the lower 8 Meg into the ITLB. */
lis r8, KERNELBASE@h /* Create vaddr for TLB */
ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r8
- mtspr SPRN_MD_EPN, r8
li r8, MI_PS8MEG | (2 << 5) /* Set 8M byte page, APG 2 */
ori r8, r8, MI_SVALID /* Make it valid */
mtspr SPRN_MI_TWC, r8
- li r8, MI_PS8MEG /* Set 8M byte page, APG 0 */
- ori r8, r8, MI_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */
mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
- mtspr SPRN_MD_RPN, r8
+
lis r8, MI_APG_INIT@h /* Set protection modes */
ori r8, r8, MI_APG_INIT@l
mtspr SPRN_MI_AP, r8
@@ -812,51 +839,25 @@ initial_mmu:
ori r8, r8, MD_APG_INIT@l
mtspr SPRN_MD_AP, r8
- /* Map another 8 MByte at the IMMR to get the processor
+ /* Map a 512k page for the IMMR to get the processor
* internal registers (among other things).
*/
-#ifdef CONFIG_PIN_TLB
- addi r10, r10, 0x0100
+#ifdef CONFIG_PIN_TLB_IMMR
+ ori r10, r10, 0x1c00
mtspr SPRN_MD_CTR, r10
-#endif
+
mfspr r9, 638 /* Get current IMMR */
- andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
+ andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
- mr r8, r9 /* Create vaddr for TLB */
+ lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */
ori r8, r8, MD_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8
- li r8, MD_PS8MEG /* Set 8M byte page */
+ li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
ori r8, r8, MD_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
mtspr SPRN_MD_RPN, r8
-
-#ifdef CONFIG_PIN_TLB
- /* Map two more 8M kernel data pages.
- */
- addi r10, r10, 0x0100
- mtspr SPRN_MD_CTR, r10
-
- lis r8, KERNELBASE@h /* Create vaddr for TLB */
- addis r8, r8, 0x0080 /* Add 8M */
- ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr SPRN_MD_EPN, r8
- li r9, MI_PS8MEG /* Set 8M byte page */
- ori r9, r9, MI_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r9
- li r11, MI_BOOTINIT /* Create RPN for address 0 */
- addis r11, r11, 0x0080 /* Add 8M */
- mtspr SPRN_MD_RPN, r11
-
- addi r10, r10, 0x0100
- mtspr SPRN_MD_CTR, r10
-
- addis r8, r8, 0x0080 /* Add 8M */
- mtspr SPRN_MD_EPN, r8
- mtspr SPRN_MD_TWC, r9
- addis r11, r11, 0x0080 /* Add 8M */
- mtspr SPRN_MD_RPN, r11
#endif
/* Since the cache is enabled according to the information we
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6b4d01d1ccf0..4e7759c8ca30 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -167,7 +167,15 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
+#ifdef CONFIG_PPC_SPLPAR
void (*dtl_consumer)(struct dtl_entry *, u64);
+#endif
+
+#ifdef CONFIG_PPC64
+#define get_accounting(tsk) (&get_paca()->accounting)
+#else
+#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
+#endif
static void calc_cputime_factors(void)
{
@@ -187,7 +195,7 @@ static void calc_cputime_factors(void)
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
-static u64 read_spurr(u64 tb)
+static unsigned long read_spurr(unsigned long tb)
{
if (cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
@@ -250,8 +258,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
void accumulate_stolen_time(void)
{
u64 sst, ust;
-
u8 save_soft_enabled = local_paca->soft_enabled;
+ struct cpu_accounting_data *acct = &local_paca->accounting;
/* We are called early in the exception entry, before
* soft/hard_enabled are sync'ed to the expected state
@@ -261,10 +269,10 @@ void accumulate_stolen_time(void)
*/
local_paca->soft_enabled = 0;
- sst = scan_dispatch_log(local_paca->starttime_user);
- ust = scan_dispatch_log(local_paca->starttime);
- local_paca->system_time -= sst;
- local_paca->user_time -= ust;
+ sst = scan_dispatch_log(acct->starttime_user);
+ ust = scan_dispatch_log(acct->starttime);
+ acct->system_time -= sst;
+ acct->user_time -= ust;
local_paca->stolen_time += ust + sst;
local_paca->soft_enabled = save_soft_enabled;
@@ -276,7 +284,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
stolen = scan_dispatch_log(stop_tb);
- get_paca()->system_time -= stolen;
+ get_paca()->accounting.system_time -= stolen;
}
stolen += get_paca()->stolen_time;
@@ -296,27 +304,29 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
* Account time for a transition between system, hard irq
* or soft irq state.
*/
-static u64 vtime_delta(struct task_struct *tsk,
- u64 *sys_scaled, u64 *stolen)
+static unsigned long vtime_delta(struct task_struct *tsk,
+ unsigned long *sys_scaled,
+ unsigned long *stolen)
{
- u64 now, nowscaled, deltascaled;
- u64 udelta, delta, user_scaled;
+ unsigned long now, nowscaled, deltascaled;
+ unsigned long udelta, delta, user_scaled;
+ struct cpu_accounting_data *acct = get_accounting(tsk);
WARN_ON_ONCE(!irqs_disabled());
now = mftb();
nowscaled = read_spurr(now);
- get_paca()->system_time += now - get_paca()->starttime;
- get_paca()->starttime = now;
- deltascaled = nowscaled - get_paca()->startspurr;
- get_paca()->startspurr = nowscaled;
+ acct->system_time += now - acct->starttime;
+ acct->starttime = now;
+ deltascaled = nowscaled - acct->startspurr;
+ acct->startspurr = nowscaled;
*stolen = calculate_stolen_time(now);
- delta = get_paca()->system_time;
- get_paca()->system_time = 0;
- udelta = get_paca()->user_time - get_paca()->utime_sspurr;
- get_paca()->utime_sspurr = get_paca()->user_time;
+ delta = acct->system_time;
+ acct->system_time = 0;
+ udelta = acct->user_time - acct->utime_sspurr;
+ acct->utime_sspurr = acct->user_time;
/*
* Because we don't read the SPURR on every kernel entry/exit,
@@ -338,14 +348,14 @@ static u64 vtime_delta(struct task_struct *tsk,
*sys_scaled = deltascaled;
}
}
- get_paca()->user_time_scaled += user_scaled;
+ acct->user_time_scaled += user_scaled;
return delta;
}
void vtime_account_system(struct task_struct *tsk)
{
- u64 delta, sys_scaled, stolen;
+ unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
account_system_time(tsk, 0, delta, sys_scaled);
@@ -356,7 +366,7 @@ EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk)
{
- u64 delta, sys_scaled, stolen;
+ unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
account_idle_time(delta + stolen);
@@ -374,15 +384,32 @@ void vtime_account_idle(struct task_struct *tsk)
void vtime_account_user(struct task_struct *tsk)
{
cputime_t utime, utimescaled;
+ struct cpu_accounting_data *acct = get_accounting(tsk);
- utime = get_paca()->user_time;
- utimescaled = get_paca()->user_time_scaled;
- get_paca()->user_time = 0;
- get_paca()->user_time_scaled = 0;
- get_paca()->utime_sspurr = 0;
+ utime = acct->user_time;
+ utimescaled = acct->user_time_scaled;
+ acct->user_time = 0;
+ acct->user_time_scaled = 0;
+ acct->utime_sspurr = 0;
account_user_time(tsk, utime, utimescaled);
}
+#ifdef CONFIG_PPC32
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+void arch_vtime_task_switch(struct task_struct *prev)
+{
+ struct cpu_accounting_data *acct = get_accounting(current);
+
+ acct->starttime = get_accounting(prev)->starttime;
+ acct->system_time = 0;
+ acct->user_time = 0;
+}
+#endif /* CONFIG_PPC32 */
+
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#define calc_cputime_factors()
#endif