apparmor/kernel-patches/for-mainline/rework-locking.diff
2007-02-21 01:08:46 +00:00

618 lines
18 KiB
Diff

Index: b/security/apparmor/apparmor.h
===================================================================
--- a/security/apparmor/apparmor.h
+++ b/security/apparmor/apparmor.h
@@ -128,9 +128,6 @@ struct aa_profile {
extern struct list_head profile_list;
extern rwlock_t profile_list_lock;
-extern struct list_head task_context_list;
-extern rwlock_t task_context_list_lock;
-
/**
* struct aa_task_context - primary label for confined tasks
* @profile: the current profile
@@ -155,9 +152,6 @@ static inline struct aa_task_context *aa
return (struct aa_task_context *)task->security;
}
-/* Lock protecting access to 'struct aa_task_context' accesses */
-extern spinlock_t cxt_lock;
-
extern struct aa_profile *null_complain_profile;
/* aa_audit - AppArmor auditing structure
@@ -242,9 +236,6 @@ extern struct aa_profile *__aa_find_prof
/* list.c */
extern void aa_profilelist_release(void);
-extern void aa_task_context_list_add(struct aa_task_context *);
-extern void aa_task_context_list_remove(struct aa_task_context *);
-extern void aa_task_context_list_release(void);
/* module_interface.c */
extern ssize_t aa_file_prof_add(void *, size_t);
Index: b/security/apparmor/inline.h
===================================================================
--- a/security/apparmor/inline.h
+++ b/security/apparmor/inline.h
@@ -42,7 +42,7 @@ static inline struct aa_profile *aa_get_
rcu_read_lock();
cxt = aa_task_context(task);
if (cxt) {
- profile = (struct aa_profile *)rcu_dereference(cxt->profile);
+ profile = rcu_dereference(cxt->profile);
aa_dup_profile(profile);
}
rcu_read_unlock();
@@ -79,41 +79,22 @@ static inline void aa_change_profile(str
aa_put_profile(old_profile);
}
-/**
- * alloc_aa_task_context - allocate a new aa_task_context
- * @task: task struct
- *
- * Allocate a new aa_task_context including a backpointer to it's referring
- * task.
- */
-static inline struct aa_task_context *alloc_aa_task_context(struct task_struct *task)
+static inline struct aa_task_context *
+aa_alloc_task_context(struct task_struct *task)
{
struct aa_task_context *cxt;
cxt = kzalloc(sizeof(struct aa_task_context), GFP_KERNEL);
- if (!cxt)
- goto out;
-
- /* back pointer to task */
- cxt->task = task;
-
- /* any readers of the list must make sure that they can handle
- * case where cxt->profile is not yet set (null)
- */
- aa_task_context_list_add(cxt);
+ if (cxt) {
+ INIT_LIST_HEAD(&cxt->list);
+ cxt->task = task;
+ }
-out:
return cxt;
}
-/**
- * free_aa_task_context - Free a aa_task_context previously allocated by
- * alloc_aa_task_context
- * @cxt: aa_task_context
- */
-static inline void free_aa_task_context(struct aa_task_context *cxt)
+static inline void aa_free_task_context(struct aa_task_context *cxt)
{
- aa_task_context_list_remove(cxt);
kfree(cxt);
}
Index: b/security/apparmor/list.c
===================================================================
--- a/security/apparmor/list.c
+++ b/security/apparmor/list.c
@@ -17,10 +17,6 @@
LIST_HEAD(profile_list);
rwlock_t profile_list_lock = RW_LOCK_UNLOCKED;
-/* list of all task_contexts and lock */
-LIST_HEAD(task_context_list);
-rwlock_t task_context_list_lock = RW_LOCK_UNLOCKED;
-
/**
* __aa_find_profile - look up a profile on the profile list
* @name: name of profile to find
@@ -56,58 +52,6 @@ void aa_profilelist_release(void)
write_unlock(&profile_list_lock);
}
-/**
- * aa_task_context_list_add - Add aa_task_context to task_context_list
- * @cxt: new aa_task_context
- */
-void aa_task_context_list_add(struct aa_task_context *cxt)
-{
- unsigned long flags;
-
- if (!cxt) {
- AA_INFO("%s: bad aa_task_context\n", __FUNCTION__);
- return;
- }
-
- write_lock_irqsave(&task_context_list_lock, flags);
- /* new aa_task_contexts must be added to the end of the list due to a
- * subtle interaction between fork and profile replacement.
- */
- list_add_tail(&cxt->list, &task_context_list);
- write_unlock_irqrestore(&task_context_list_lock, flags);
-}
-
-/**
- * aa_task_context_list_remove - Remove aa_task_context from task_context_list
- * @cxt: aa_task_context to be removed
- */
-void aa_task_context_list_remove(struct aa_task_context *cxt)
-{
- unsigned long flags;
-
- if (cxt) {
- write_lock_irqsave(&task_context_list_lock, flags);
- list_del_init(&cxt->list);
- write_unlock_irqrestore(&task_context_list_lock, flags);
- }
-}
-
-/**
- * aa_task_context_list_release - Remove all aa_task_contexts from
- * task_context_list
- */
-void aa_task_context_list_release(void)
-{
- struct aa_task_context *node, *tmp;
- unsigned long flags;
-
- write_lock_irqsave(&task_context_list_lock, flags);
- list_for_each_entry_safe(node, tmp, &task_context_list, list) {
- list_del_init(&node->list);
- }
- write_unlock_irqrestore(&task_context_list_lock, flags);
-}
-
/* seq_file helper routines
* Used by apparmorfs.c to iterate over profile_list
*/
Index: b/security/apparmor/lsm.c
===================================================================
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -21,9 +21,6 @@
#include "apparmor.h"
#include "inline.h"
-/* struct aa_task_context write update lock (read side is RCU). */
-spinlock_t cxt_lock = SPIN_LOCK_UNLOCKED;
-
/* Flag values, also controllable via apparmorfs/control.
* We explicitly do not allow these to be modifiable when exported via
* /sys/modules/parameters, as we want to do additional mediation and
@@ -749,49 +746,68 @@ createfs_out:
static void __exit apparmor_exit(void)
{
- struct aa_task_context *cxt;
- unsigned long flags;
+ LIST_HEAD(task_contexts_bucket);
- /* Remove profiles from the global profile list.
- * This is just for tidyness as there is no way to reference this
- * list once the AppArmor lsm hooks are detached (below)
- */
- aa_profilelist_release();
+ /* Remove and release all the profiles on the profile list. */
+ write_lock(&profile_list_lock);
+ while (!list_empty(&profile_list)) {
+ struct aa_profile *profile =
+ list_entry(profile_list.next, struct aa_profile, list);
+
+ /* Remove the profile from each task context it is on. */
+ lock_profile(profile);
+ while (!list_empty(&profile->task_contexts)) {
+ struct aa_task_context *cxt =
+ list_entry(profile->task_contexts.next,
+ struct aa_task_context, list);
+
+ /*
+ * Detach the task context from the task. Protect
+ * from races by taking the task lock here.
+ */
+ task_lock(cxt->task);
+ rcu_assign_pointer(cxt->task->security, NULL);
+ task_unlock(cxt->task);
+ aa_change_profile(cxt, NULL, 0);
- /* Remove profiles from profiled tasks
- * If this is not done, if module is reloaded after being removed,
- * old profiles (still refcounted in memory) will become 'magically'
- * reattached
- */
+ /*
+ * Defer release: the task context may still have
+ * active readers.
+ */
+ list_move(&cxt->list, &task_contexts_bucket);
+ }
+ unlock_profile(profile);
- /*
- * FIXME: We have a lock inversion here (cp. aa_file_prof_repl,
- * aa_file_prof_remove).
- */
- spin_lock_irqsave(&cxt_lock, flags);
- read_lock(&task_context_list_lock);
- list_for_each_entry(cxt, &task_context_list, list) {
- if (cxt->profile)
- aa_change_profile(cxt, NULL, 0);
+ /* Release the profile itself. */
+ list_del_init(&profile->list);
+ aa_put_profile(profile);
}
- read_unlock(&task_context_list_lock);
- spin_unlock_irqrestore(&cxt_lock, flags);
-
- /* Free up list of profile aa_task_context */
- aa_task_context_list_release();
+ write_unlock(&profile_list_lock);
free_null_complain_profile();
+ /**
+ * Delay for an rcu cycle to make sure that all active task
+ * context readers have finished, and all profiles have been
+ * freed by their rcu callbacks.
+ */
+ synchronize_rcu();
+
+ /* Now we can safely free all remaining task contexts. */
+ while (!list_empty(&task_contexts_bucket)) {
+ struct aa_task_context *cxt =
+ list_entry(task_contexts_bucket.next,
+ struct aa_task_context, list);
+
+ list_del(&cxt->list);
+ kfree(cxt);
+ }
+
destroy_apparmorfs();
if (unregister_security(&apparmor_ops))
AA_WARN("Unable to properly unregister AppArmor\n");
- /* delay for an rcu cycle to make ensure that profiles pending
- * destruction in the rcu callback are freed.
- */
- synchronize_rcu();
-
AA_INFO("AppArmor protection removed\n");
aa_audit_message(NULL, GFP_KERNEL, 0,
"AppArmor protection removed\n");
Index: b/security/apparmor/main.c
===================================================================
--- a/security/apparmor/main.c
+++ b/security/apparmor/main.c
@@ -717,19 +717,9 @@ int aa_link(struct aa_profile *profile,
*******************************/
/**
- * aa_fork - create a new aa_task_context
- * @task: new process
- *
- * Create a new aa_task_context for newly created process @task if it's parent
- * is already confined. Otherwise a aa_task_context will be lazily allocated
- * will get one with NULL values. Return 0 on sucess.
- * for the child if it subsequently execs (in aa_register).
- * Return 0 on sucess.
- *
- * The cxt_lock is used to maintain consistency against profile
- * replacement/removal.
+ * aa_fork - initialize the task context for a new task
+ * @task: task that is being created
*/
-
int aa_fork(struct task_struct *task)
{
struct aa_task_context *cxt = aa_task_context(current);
@@ -738,9 +728,7 @@ int aa_fork(struct task_struct *task)
AA_DEBUG("%s\n", __FUNCTION__);
if (cxt && cxt->profile) {
- unsigned long flags;
-
- newcxt = alloc_aa_task_context(task);
+ newcxt = aa_alloc_task_context(task);
/* FIXME: The alloc above is a blocking operation, so
* cxt->profile may have vanished by now.
@@ -758,9 +746,7 @@ int aa_fork(struct task_struct *task)
* race with profile replacement or removal here, and
* he new task would end up with an obsolete profile.
*/
- spin_lock_irqsave(&cxt_lock, flags);
aa_change_profile(newcxt, cxt->profile, cxt->hat_magic);
- spin_unlock_irqrestore(&cxt_lock, flags);
if (APPARMOR_COMPLAIN(cxt) &&
cxt->profile == null_complain_profile)
@@ -911,7 +897,6 @@ repeat:
/* Apply the new profile, or switch to unconfined if NULL. */
if (!IS_ERR(newprofile)) {
struct aa_task_context *cxt, *lazy_cxt = NULL;
- unsigned long flags;
/* grab a lock - this is to guarentee consistency against
* other writers of aa_task_context (replacement/removal)
@@ -937,7 +922,7 @@ repeat:
*/
if (!profile && !(cxt = aa_task_context(current))) {
- lazy_cxt = alloc_aa_task_context(current);
+ lazy_cxt = aa_alloc_task_context(current);
if (!lazy_cxt) {
AA_ERROR("%s: Failed to allocate aa_task_context\n",
__FUNCTION__);
@@ -946,13 +931,11 @@ repeat:
}
}
- spin_lock_irqsave(&cxt_lock, flags);
-
cxt = aa_task_context(current);
if (lazy_cxt) {
if (cxt) {
/* raced by setprofile - created cxt */
- free_aa_task_context(lazy_cxt);
+ aa_free_task_context(lazy_cxt);
lazy_cxt = NULL;
} else {
/* Not rcu used to get the write barrier
@@ -978,7 +961,6 @@ repeat:
/* Race, profile was removed, not replaced.
* Redo with error checking
*/
- spin_unlock_irqrestore(&cxt_lock, flags);
goto repeat;
}
}
@@ -1006,8 +988,6 @@ repeat:
LOG_HINT(newprofile, GFP_ATOMIC, HINT_CHGPROF,
"pid=%d\n",
current->pid);
-
- spin_unlock_irqrestore(&cxt_lock, flags);
}
cleanup:
@@ -1020,28 +1000,51 @@ cleanup:
}
/**
- * aa_release - release the task's aa_task_context
+ * aa_release - release a task context
* @task: task being released
*
* This is called after a task has exited and the parent has reaped it.
- * @task->security blob is freed.
- *
- * This is the one case where we don't need to hold the cxt_lock before
- * removing a profile from a aa_task_context. Once the aa_task_context has
- * been removed from the aa_task_context_list, we are no longer racing other
- * writers. There may still be other readers so we must still use
- * aa_change_profile to put the aa_task_context's reference safely.
*/
void aa_release(struct task_struct *task)
{
- struct aa_task_context *cxt = aa_task_context(task);
- if (cxt) {
- task->security = NULL;
+ struct aa_task_context *cxt = NULL;
+ struct aa_profile *profile;
- aa_task_context_list_remove(cxt);
- aa_change_profile(cxt, NULL, 0);
+ /*
+ * While the task context is still on a profile's task context
+ * list, another process could replace the profile under us,
+ * leaving us with a locked profile that is no longer attached
+ * to this task. So after locking the profile, we lock the task
+ * to make sure that the profile is still attached. (We must
+ * lock the profile first to avoid lock inversion.)
+ *
+ * If the task does not have a profile attached at this point,
+ * we are safe.
+ */
+repeat:
+ profile = aa_get_profile(task);
+ if (profile) {
+ lock_profile(profile);
+ task_lock(task);
+ cxt = aa_task_context(task);
+ if (unlikely(profile != cxt->profile)) {
+ task_unlock(task);
+ unlock_profile(profile);
+ aa_put_profile(profile);
+ goto repeat;
+ }
+ /*
+ * There may still be other profile readers, so we must
+ * put the profile reference safely.
+ */
+ aa_change_profile(cxt, NULL, 0);
+ task_unlock(task);
+ list_del(&cxt->list);
+ unlock_profile(profile);
+ aa_put_profile(profile);
kfree(cxt);
+ task->security = NULL;
}
}
Index: b/security/apparmor/procattr.c
===================================================================
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -82,7 +82,6 @@ int aa_setprocattr_changehat(char *hatin
char *token = NULL, *hat, *smagic, *tmp;
u32 magic;
int rc, len, consumed;
- unsigned long flags;
AA_DEBUG("%s: %p %zd\n", __FUNCTION__, hatinfo, infosize);
@@ -158,9 +157,7 @@ int aa_setprocattr_changehat(char *hatin
AA_DEBUG("%s: Magic 0x%x Hat '%s'\n",
__FUNCTION__, magic, hat ? hat : NULL);
- spin_lock_irqsave(&cxt_lock, flags);
error = aa_change_hat(hat, magic);
- spin_unlock_irqrestore(&cxt_lock, flags);
out:
if (token) {
@@ -178,7 +175,6 @@ int aa_setprocattr_setprofile(struct tas
struct aa_profile *profile = NULL;
struct aa_task_context *cxt;
char *name = NULL;
- unsigned long flags;
AA_DEBUG("%s: current %s(%d)\n",
__FUNCTION__, current->comm, current->pid);
@@ -221,8 +217,6 @@ int aa_setprocattr_setprofile(struct tas
}
}
- spin_lock_irqsave(&cxt_lock, flags);
-
cxt = aa_task_context(task);
/* switch to unconstrained */
@@ -249,10 +243,7 @@ int aa_setprocattr_setprofile(struct tas
AA_WARN("%s: task %s(%d) has no aa_task_context\n",
__FUNCTION__, task->comm, task->pid);
- /* unlock so we can safely GFP_KERNEL */
- spin_unlock_irqrestore(&cxt_lock, flags);
-
- cxt = alloc_aa_task_context(task);
+ cxt = aa_alloc_task_context(task);
if (!cxt) {
AA_WARN("%s: Unable to allocate "
"aa_task_context for task %s(%d). "
@@ -267,11 +258,10 @@ int aa_setprocattr_setprofile(struct tas
goto out;
}
- spin_lock_irqsave(&cxt_lock, flags);
if (!aa_task_context(task)) {
task->security = cxt;
} else { /* race */
- free_aa_task_context(cxt);
+ aa_free_task_context(cxt);
cxt = aa_task_context(task);
}
}
@@ -287,7 +277,6 @@ int aa_setprocattr_setprofile(struct tas
if (!profile) {
/* Race, profile was removed. */
- spin_unlock_irqrestore(&cxt_lock, flags);
goto repeat;
}
}
@@ -312,8 +301,6 @@ int aa_setprocattr_setprofile(struct tas
aa_put_profile(profile);
}
- spin_unlock_irqrestore(&cxt_lock, flags);
-
error = 0;
out:
kfree(name);
Index: b/security/apparmor/module_interface.c
===================================================================
--- a/security/apparmor/module_interface.c
+++ b/security/apparmor/module_interface.c
@@ -460,29 +460,24 @@ ssize_t aa_file_prof_repl(void *udata, s
old_profile = __aa_find_profile(new_profile->name, &profile_list);
if (old_profile) {
struct aa_task_context *cxt;
- unsigned long flags;
-
- list_del_init(&old_profile->list);
lock_profile(old_profile);
old_profile->isstale = 1;
- unlock_profile(old_profile);
-
- /*
- * Find all tasks using the old profile and replace the old
- * profile with the new.
- */
- read_lock_irqsave(&task_context_list_lock, flags);
- list_for_each_entry(cxt, &task_context_list, list) {
- spin_lock_irqsave(&cxt_lock, flags);
-
- if (cxt->profile &&
- cxt->profile->parent == old_profile)
- task_replace(cxt, new_profile);
- spin_unlock_irqrestore(&cxt_lock, flags);
+ list_for_each_entry(cxt, &old_profile->task_contexts, list) {
+ /*
+ * Protect from racing with aa_release by taking
+ * the task lock here.
+ */
+ task_lock(cxt->task);
+ task_replace(cxt, new_profile);
+ task_unlock(cxt->task);
}
- read_unlock_irqrestore(&task_context_list_lock, flags);
+ list_splice_init(&old_profile->task_contexts,
+ &new_profile->task_contexts);
+ unlock_profile(old_profile);
+
+ list_del_init(&old_profile->list);
aa_put_profile(old_profile);
}
list_add(&new_profile->list, &profile_list);
@@ -502,8 +497,6 @@ ssize_t aa_file_prof_repl(void *udata, s
ssize_t aa_file_prof_remove(const char *name, size_t size)
{
struct aa_profile *profile;
- struct aa_task_context *cxt;
- unsigned long flags;
write_lock(&profile_list_lock);
profile = __aa_find_profile(name, &profile_list);
@@ -512,24 +505,22 @@ ssize_t aa_file_prof_remove(const char *
return -ENOENT;
}
- list_del_init(&profile->list);
-
lock_profile(profile);
profile->isstale = 1;
- unlock_profile(profile);
-
- read_lock_irqsave(&task_context_list_lock, flags);
- list_for_each_entry(cxt, &task_context_list, list) {
- if (cxt->profile && cxt->profile->parent == profile) {
- spin_lock(&cxt_lock);
- aa_change_profile(cxt, NULL, 0);
- spin_unlock(&cxt_lock);
- }
+ while (!list_empty(&profile->task_contexts)) {
+ struct aa_task_context *cxt =
+ list_entry(profile->task_contexts.next,
+ struct aa_task_context, list);
+ task_lock(cxt->task);
+ aa_change_profile(cxt, NULL, 0);
+ task_unlock(cxt->task);
+ list_del_init(&cxt->list);
}
- read_unlock_irqrestore(&task_context_list_lock, flags);
+ unlock_profile(profile);
- aa_put_profile(profile);
+ list_del_init(&profile->list);
write_unlock(&profile_list_lock);
+ aa_put_profile(profile);
return size;
}