summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/cpuset.c13
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/kfifo.c4
-rw-r--r--kernel/params.c10
-rw-r--r--kernel/posix-cpu-timers.c31
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/disk.c6
-rw-r--r--kernel/power/power.h7
-rw-r--r--kernel/power/swsusp.c36
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/rcupdate.c13
-rw-r--r--kernel/signal.c73
-rw-r--r--kernel/sys.c55
-rw-r--r--kernel/time.c1
16 files changed, 186 insertions, 87 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 83096b67510a..aefa73a8a586 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -560,7 +560,7 @@ static void audit_buffer_free(struct audit_buffer *ab)
}
static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx,
- unsigned int __nocast gfp_mask, int type)
+ gfp_t gfp_mask, int type)
{
unsigned long flags;
struct audit_buffer *ab = NULL;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 79866bc6b3a1..28176d083f7b 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -968,8 +968,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
char *page;
ssize_t retval = 0;
char *s;
- char *start;
- size_t n;
if (!(page = (char *)__get_free_page(GFP_KERNEL)))
return -ENOMEM;
@@ -999,14 +997,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
*s++ = '\n';
*s = '\0';
- /* Do nothing if *ppos is at the eof or beyond the eof. */
- if (s - page <= *ppos)
- return 0;
-
- start = page + *ppos;
- n = s - start;
- retval = n - copy_to_user(buf, start, min(n, nbytes));
- *ppos += retval;
+ retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
out:
free_page((unsigned long)page);
return retval;
@@ -1679,7 +1670,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
* GFP_USER - only nodes in current tasks mems allowed ok.
**/
-int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
+int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
diff --git a/kernel/exit.c b/kernel/exit.c
index 6d2089a1bce7..43077732619b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -371,6 +371,12 @@ static inline void close_files(struct files_struct * files)
struct fdtable *fdt;
j = 0;
+
+ /*
+ * It is safe to dereference the fd table without RCU or
+ * ->file_lock because this is the last reference to the
+ * files structure.
+ */
fdt = files_fdtable(files);
for (;;) {
unsigned long set;
@@ -1197,7 +1203,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
exit_code = p->exit_code;
if (unlikely(!exit_code) ||
- unlikely(p->state > TASK_STOPPED))
+ unlikely(p->state & TASK_TRACED))
goto bail_ref;
return wait_noreap_copyout(p, pid, uid,
why, (exit_code << 8) | 0x7f,
diff --git a/kernel/fork.c b/kernel/fork.c
index 8149f3602881..280bd44ac441 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -848,7 +848,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
- new_flags &= ~PF_SUPERPRIV;
+ new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
new_flags |= PF_FORKNOEXEC;
if (!(clone_flags & CLONE_PTRACE))
p->ptrace = 0;
@@ -1062,7 +1062,8 @@ static task_t *copy_process(unsigned long clone_flags,
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
- if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed)))
+ if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
+ !cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
/*
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 179baafcdd96..64ab045c3d9d 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -36,7 +36,7 @@
* struct kfifo with kfree().
*/
struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- unsigned int __nocast gfp_mask, spinlock_t *lock)
+ gfp_t gfp_mask, spinlock_t *lock)
{
struct kfifo *fifo;
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(kfifo_init);
*
* The size will be rounded-up to a power of 2.
*/
-struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, spinlock_t *lock)
+struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
{
unsigned char *buffer;
struct kfifo *ret;
diff --git a/kernel/params.c b/kernel/params.c
index fbf173215fd2..1a8614bac5d5 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -80,8 +80,6 @@ static char *next_arg(char *args, char **param, char **val)
int in_quote = 0, quoted = 0;
char *next;
- /* Chew any extra spaces */
- while (*args == ' ') args++;
if (*args == '"') {
args++;
in_quote = 1;
@@ -121,6 +119,10 @@ static char *next_arg(char *args, char **param, char **val)
next = args + i + 1;
} else
next = args + i;
+
+ /* Chew up trailing spaces. */
+ while (*next == ' ')
+ next++;
return next;
}
@@ -135,6 +137,10 @@ int parse_args(const char *name,
DEBUGP("Parsing ARGS: %s\n", args);
+ /* Chew leading spaces */
+ while (*args == ' ')
+ args++;
+
while (*args) {
int ret;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index ad85d3f0dcc4..b3f3edc475de 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -387,25 +387,19 @@ int posix_cpu_timer_del(struct k_itimer *timer)
if (unlikely(p == NULL))
return 0;
+ spin_lock(&p->sighand->siglock);
if (!list_empty(&timer->it.cpu.entry)) {
- read_lock(&tasklist_lock);
- if (unlikely(p->signal == NULL)) {
- /*
- * We raced with the reaping of the task.
- * The deletion should have cleared us off the list.
- */
- BUG_ON(!list_empty(&timer->it.cpu.entry));
- } else {
- /*
- * Take us off the task's timer list.
- */
- spin_lock(&p->sighand->siglock);
- list_del(&timer->it.cpu.entry);
- spin_unlock(&p->sighand->siglock);
- }
- read_unlock(&tasklist_lock);
+ /*
+ * Take us off the task's timer list. We don't need to
+ * take tasklist_lock and check for the task being reaped.
+ * If it was reaped, it already called posix_cpu_timers_exit
+ * and posix_cpu_timers_exit_group to clear all the timers
+ * that pointed to it.
+ */
+ list_del(&timer->it.cpu.entry);
+ put_task_struct(p);
}
- put_task_struct(p);
+ spin_unlock(&p->sighand->siglock);
return 0;
}
@@ -424,6 +418,7 @@ static void cleanup_timers(struct list_head *head,
cputime_t ptime = cputime_add(utime, stime);
list_for_each_entry_safe(timer, next, head, entry) {
+ put_task_struct(timer->task);
timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, ptime)) {
@@ -436,6 +431,7 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
+ put_task_struct(timer->task);
timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, utime)) {
@@ -448,6 +444,7 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
+ put_task_struct(timer->task);
timer->task = NULL;
list_del_init(&timer->entry);
if (timer->expires.sched < sched_time) {
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 396c7873e804..46a5e5acff97 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -29,7 +29,7 @@ config PM_DEBUG
config SOFTWARE_SUSPEND
bool "Software Suspend"
- depends on PM && SWAP && (X86 || ((FVR || PPC32) && !SMP))
+ depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FVR || PPC32) && !SMP)
---help---
Enable the possibility of suspending the machine.
It doesn't need APM.
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 2d8bf054d036..761956e813f5 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -17,12 +17,12 @@
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/pm.h>
#include "power.h"
extern suspend_disk_method_t pm_disk_mode;
-extern struct pm_ops * pm_ops;
extern int swsusp_suspend(void);
extern int swsusp_write(void);
@@ -49,13 +49,11 @@ dev_t swsusp_resume_device;
static void power_down(suspend_disk_method_t mode)
{
- unsigned long flags;
int error = 0;
- local_irq_save(flags);
switch(mode) {
case PM_DISK_PLATFORM:
- device_shutdown();
+ kernel_power_off_prepare();
error = pm_ops->enter(PM_SUSPEND_DISK);
break;
case PM_DISK_SHUTDOWN:
diff --git a/kernel/power/power.h b/kernel/power/power.h
index cd6a3493cc0d..6748de23e83c 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -1,7 +1,7 @@
#include <linux/suspend.h>
#include <linux/utsname.h>
-/* With SUSPEND_CONSOLE defined, it suspend looks *really* cool, but
+/* With SUSPEND_CONSOLE defined suspend looks *really* cool, but
we probably do not take enough locks for switching consoles, etc,
so bad things might happen.
*/
@@ -9,6 +9,9 @@
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
#endif
+#define MAX_PBES ((PAGE_SIZE - sizeof(struct new_utsname) \
+ - 4 - 3*sizeof(unsigned long) - sizeof(int) \
+ - sizeof(void *)) / sizeof(swp_entry_t))
struct swsusp_info {
struct new_utsname uts;
@@ -18,7 +21,7 @@ struct swsusp_info {
unsigned long image_pages;
unsigned long pagedir_pages;
suspend_pagedir_t * suspend_pagedir;
- swp_entry_t pagedir[768];
+ swp_entry_t pagedir[MAX_PBES];
} __attribute__((aligned(PAGE_SIZE)));
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index d967e875ee82..2d5c45676442 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -363,7 +363,7 @@ static void lock_swapdevices(void)
}
/**
- * write_swap_page - Write one page to a fresh swap location.
+ * write_page - Write one page to a fresh swap location.
* @addr: Address we're writing.
* @loc: Place to store the entry we used.
*
@@ -402,15 +402,14 @@ static int write_page(unsigned long addr, swp_entry_t * loc)
static void data_free(void)
{
swp_entry_t entry;
- int i;
+ struct pbe * p;
- for (i = 0; i < nr_copy_pages; i++) {
- entry = (pagedir_nosave + i)->swap_address;
+ for_each_pbe(p, pagedir_nosave) {
+ entry = p->swap_address;
if (entry.val)
swap_free(entry);
else
break;
- (pagedir_nosave + i)->swap_address = (swp_entry_t){0};
}
}
@@ -863,6 +862,9 @@ static int alloc_image_pages(void)
return 0;
}
+/* Free pages we allocated for suspend. Suspend pages are alocated
+ * before atomic copy, so we need to free them after resume.
+ */
void swsusp_free(void)
{
BUG_ON(PageNosave(virt_to_page(pagedir_save)));
@@ -918,6 +920,7 @@ static int swsusp_alloc(void)
pagedir_nosave = NULL;
nr_copy_pages = calc_nr(nr_copy_pages);
+ nr_copy_pages_check = nr_copy_pages;
pr_debug("suspend: (pages needed: %d + %d free: %d)\n",
nr_copy_pages, PAGES_FOR_IO, nr_free_pages());
@@ -928,6 +931,10 @@ static int swsusp_alloc(void)
if (!enough_swap())
return -ENOSPC;
+ if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
+ !!(nr_copy_pages % PBES_PER_PAGE))
+ return -ENOSPC;
+
if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
return -ENOMEM;
@@ -940,7 +947,6 @@ static int swsusp_alloc(void)
return error;
}
- nr_copy_pages_check = nr_copy_pages;
return 0;
}
@@ -1089,7 +1095,7 @@ static inline void eat_page(void *page)
*eaten_memory = c;
}
-static unsigned long get_usable_page(unsigned gfp_mask)
+unsigned long get_usable_page(unsigned gfp_mask)
{
unsigned long m;
@@ -1103,7 +1109,7 @@ static unsigned long get_usable_page(unsigned gfp_mask)
return m;
}
-static void free_eaten_memory(void)
+void free_eaten_memory(void)
{
unsigned long m;
void **c;
@@ -1213,8 +1219,9 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
free_pagedir(pblist);
free_eaten_memory();
pblist = NULL;
- }
- else
+ /* Is this even worth handling? It should never ever happen, and we
+ have just lost user's state, anyway... */
+ } else
printk("swsusp: Relocated %d pages\n", rel);
return pblist;
@@ -1434,9 +1441,9 @@ static int read_pagedir(struct pbe *pblist)
}
if (error)
- free_page((unsigned long)pblist);
-
- BUG_ON(i != swsusp_info.pagedir_pages);
+ free_pagedir(pblist);
+ else
+ BUG_ON(i != swsusp_info.pagedir_pages);
return error;
}
@@ -1474,11 +1481,12 @@ static int read_suspend_image(void)
/* Allocate memory for the image and read the data from swap */
error = check_pagedir(pagedir_nosave);
- free_eaten_memory();
+
if (!error)
error = data_read(pagedir_nosave);
if (error) { /* We fail cleanly */
+ free_eaten_memory();
for_each_pbe (p, pagedir_nosave)
if (p->address) {
free_page(p->address);
diff --git a/kernel/printk.c b/kernel/printk.c
index a967605bc2e3..4b8f0f9230a4 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -488,6 +488,11 @@ static int __init printk_time_setup(char *str)
__setup("time", printk_time_setup);
+__attribute__((weak)) unsigned long long printk_clock(void)
+{
+ return sched_clock();
+}
+
/*
* This is printk. It can be called from any context. We want it to work.
*
@@ -565,7 +570,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
loglev_char = default_message_loglevel
+ '0';
}
- t = sched_clock();
+ t = printk_clock();
nanosec_rem = do_div(t, 1000000000);
tlen = sprintf(tbuf,
"<%c>[%5lu.%06lu] ",
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index bef3b6901b76..2559d4b8f23f 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -71,7 +71,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-static int maxbatch = 10;
+static int maxbatch = 10000;
#ifndef __HAVE_ARCH_CMPXCHG
/*
@@ -109,6 +109,10 @@ void fastcall call_rcu(struct rcu_head *head,
rdp = &__get_cpu_var(rcu_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
+
+ if (unlikely(++rdp->count > 10000))
+ set_need_resched();
+
local_irq_restore(flags);
}
@@ -140,6 +144,12 @@ void fastcall call_rcu_bh(struct rcu_head *head,
rdp = &__get_cpu_var(rcu_bh_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
+ rdp->count++;
+/*
+ * Should we directly call rcu_do_batch() here ?
+ * if (unlikely(rdp->count > 10000))
+ * rcu_do_batch(rdp);
+ */
local_irq_restore(flags);
}
@@ -157,6 +167,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
next = rdp->donelist = list->next;
list->func(list);
list = next;
+ rdp->count--;
if (++count >= maxbatch)
break;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index b92c3c9f8b9a..50c992643771 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -262,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask)
return sig;
}
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
int override_rlimit)
{
struct sigqueue *q = NULL;
@@ -578,7 +578,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
- tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+ if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
+ tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
}
if ( signr &&
((info->si_code & __SI_MASK) == __SI_TIMER) &&
@@ -936,34 +937,31 @@ force_sig_specific(int sig, struct task_struct *t)
* as soon as they're available, so putting the signal on the shared queue
* will be equivalent to sending it to one such thread.
*/
-#define wants_signal(sig, p, mask) \
- (!sigismember(&(p)->blocked, sig) \
- && !((p)->state & mask) \
- && !((p)->flags & PF_EXITING) \
- && (task_curr(p) || !signal_pending(p)))
-
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+ if (sigismember(&p->blocked, sig))
+ return 0;
+ if (p->flags & PF_EXITING)
+ return 0;
+ if (sig == SIGKILL)
+ return 1;
+ if (p->state & (TASK_STOPPED | TASK_TRACED))
+ return 0;
+ return task_curr(p) || !signal_pending(p);
+}
static void
__group_complete_signal(int sig, struct task_struct *p)
{
- unsigned int mask;
struct task_struct *t;
/*
- * Don't bother traced and stopped tasks (but
- * SIGKILL will punch through that).
- */
- mask = TASK_STOPPED | TASK_TRACED;
- if (sig == SIGKILL)
- mask = 0;
-
- /*
* Now find a thread we can wake up to take the signal off the queue.
*
* If the main thread wants the signal, it gets first crack.
* Probably the least surprising to the average bear.
*/
- if (wants_signal(sig, p, mask))
+ if (wants_signal(sig, p))
t = p;
else if (thread_group_empty(p))
/*
@@ -981,7 +979,7 @@ __group_complete_signal(int sig, struct task_struct *p)
t = p->signal->curr_target = p;
BUG_ON(t->tgid != p->tgid);
- while (!wants_signal(sig, t, mask)) {
+ while (!wants_signal(sig, t)) {
t = next_thread(t);
if (t == p->signal->curr_target)
/*
@@ -1195,6 +1193,40 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
return error;
}
+/* like kill_proc_info(), but doesn't use uid/euid of "current" */
+int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
+ uid_t uid, uid_t euid)
+{
+ int ret = -EINVAL;
+ struct task_struct *p;
+
+ if (!valid_signal(sig))
+ return ret;
+
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ if (!p) {
+ ret = -ESRCH;
+ goto out_unlock;
+ }
+ if ((!info || ((unsigned long)info != 1 &&
+ (unsigned long)info != 2 && SI_FROMUSER(info)))
+ && (euid != p->suid) && (euid != p->uid)
+ && (uid != p->suid) && (uid != p->uid)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+ if (sig && p->sighand) {
+ unsigned long flags;
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ ret = __group_send_sig_info(sig, info, p);
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ }
+out_unlock:
+ read_unlock(&tasklist_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1766,7 +1798,8 @@ do_signal_stop(int signr)
* stop is always done with the siglock held,
* so this check has no races.
*/
- if (t->state < TASK_STOPPED) {
+ if (!t->exit_state &&
+ !(t->state & (TASK_STOPPED|TASK_TRACED))) {
stop_count++;
signal_wake_up(t, 0);
}
diff --git a/kernel/sys.c b/kernel/sys.c
index c80412be2302..2fa1ed18123c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -361,17 +361,35 @@ out_unlock:
return retval;
}
+/**
+ * emergency_restart - reboot the system
+ *
+ * Without shutting down any hardware or taking any locks
+ * reboot the system. This is called when we know we are in
+ * trouble so this is our best effort to reboot. This is
+ * safe to call in interrupt context.
+ */
void emergency_restart(void)
{
machine_emergency_restart();
}
EXPORT_SYMBOL_GPL(emergency_restart);
-void kernel_restart(char *cmd)
+/**
+ * kernel_restart - reboot the system
+ *
+ * Shutdown everything and perform a clean reboot.
+ * This is not safe to call in interrupt context.
+ */
+void kernel_restart_prepare(char *cmd)
{
notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
device_shutdown();
+}
+void kernel_restart(char *cmd)
+{
+ kernel_restart_prepare(cmd);
if (!cmd) {
printk(KERN_EMERG "Restarting system.\n");
} else {
@@ -382,6 +400,12 @@ void kernel_restart(char *cmd)
}
EXPORT_SYMBOL_GPL(kernel_restart);
+/**
+ * kernel_kexec - reboot the system
+ *
+ * Move into place and start executing a preloaded standalone
+ * executable. If nothing was preloaded return an error.
+ */
void kernel_kexec(void)
{
#ifdef CONFIG_KEXEC
@@ -390,9 +414,7 @@ void kernel_kexec(void)
if (!image) {
return;
}
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- system_state = SYSTEM_RESTART;
- device_shutdown();
+ kernel_restart_prepare(NULL);
printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown();
machine_kexec(image);
@@ -400,21 +422,39 @@ void kernel_kexec(void)
}
EXPORT_SYMBOL_GPL(kernel_kexec);
-void kernel_halt(void)
+/**
+ * kernel_halt - halt the system
+ *
+ * Shutdown everything and perform a clean system halt.
+ */
+void kernel_halt_prepare(void)
{
notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
system_state = SYSTEM_HALT;
device_shutdown();
+}
+void kernel_halt(void)
+{
+ kernel_halt_prepare();
printk(KERN_EMERG "System halted.\n");
machine_halt();
}
EXPORT_SYMBOL_GPL(kernel_halt);
-void kernel_power_off(void)
+/**
+ * kernel_power_off - power_off the system
+ *
+ * Shutdown everything and perform a clean system power_off.
+ */
+void kernel_power_off_prepare(void)
{
notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
system_state = SYSTEM_POWER_OFF;
device_shutdown();
+}
+void kernel_power_off(void)
+{
+ kernel_power_off_prepare();
printk(KERN_EMERG "Power down.\n");
machine_power_off();
}
@@ -1728,8 +1768,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = put_user(current->pdeath_signal, (int __user *)arg2);
break;
case PR_GET_DUMPABLE:
- if (current->mm->dumpable)
- error = 1;
+ error = current->mm->dumpable;
break;
case PR_SET_DUMPABLE:
if (arg2 < 0 || arg2 > 2) {
diff --git a/kernel/time.c b/kernel/time.c
index dd5ae1162a8f..40c2410ac99a 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -570,6 +570,7 @@ void getnstimeofday(struct timespec *tv)
tv->tv_sec = x.tv_sec;
tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
}
+EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
#if (BITS_PER_LONG < 64)