apparmor/parser/libapparmor_re/aare_rules.cc

260 lines
6.4 KiB
C++
Raw Normal View History

/*
* (C) 2006, 2007 Andreas Gruenbacher <agruen@suse.de>
* Copyright (c) 2003-2008 Novell, Inc. (All rights reserved)
* Copyright 2009-2013 Canonical Ltd. (All rights reserved)
*
* The libapparmor library is licensed under the terms of the GNU
* Lesser General Public License, version 2.1. Please see the file
* COPYING.LGPL.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* Wrapper around the dfa to convert aa rules into a dfa
*/
#include <ostream>
#include <iostream>
#include <fstream>
#include <sstream>
#include <ext/stdio_filebuf.h>
#include <assert.h>
#include <stdlib.h>
#include "aare_rules.h"
#include "expr-tree.h"
#include "parse.h"
#include "hfa.h"
#include "chfa.h"
#include "../immunix.h"
aare_rules::~aare_rules(void)
{
if (root)
root->release();
unique_perms.clear();
expr_map.clear();
}
bool aare_rules::add_rule(const char *rule, int deny, uint32_t perms,
uint32_t audit, dfaflags_t flags)
{
return add_rule_vec(deny, perms, audit, 1, &rule, flags);
}
void aare_rules::add_to_rules(Node *tree, Node *perms)
{
if (reverse)
flip_tree(tree);
Node *base = expr_map[perms];
if (base)
expr_map[perms] = new AltNode(base, tree);
else
expr_map[perms] = tree;
}
static Node *cat_with_null_seperator(Node *l, Node *r)
{
return new CatNode(new CatNode(l, new CharNode(0)), r);
}
bool aare_rules::add_rule_vec(int deny, uint32_t perms, uint32_t audit,
int count, const char **rulev, dfaflags_t flags)
{
Node *tree = NULL, *accept;
int exact_match;
if (regex_parse(&tree, rulev[0]))
return false;
for (int i = 1; i < count; i++) {
Node *subtree = NULL;
if (regex_parse(&subtree, rulev[i]))
return false;
tree = cat_with_null_seperator(tree, subtree);
}
/*
* Check if we have an expression with or without wildcards. This
* determines how exec modifiers are merged in accept_perms() based
* on how we split permission bitmasks here.
*/
exact_match = 1;
for (depth_first_traversal i(tree); i && exact_match; i++) {
if (dynamic_cast<StarNode *>(*i) ||
dynamic_cast<PlusNode *>(*i) ||
dynamic_cast<AnyCharNode *>(*i) ||
dynamic_cast<CharSetNode *>(*i) ||
dynamic_cast<NotCharSetNode *>(*i))
exact_match = 0;
}
if (reverse)
flip_tree(tree);
accept = unique_perms.insert(deny, perms, audit, exact_match);
if (flags & DFA_DUMP_RULE_EXPR) {
cerr << "rule: ";
cerr << rulev[0];
for (int i = 1; i < count; i++) {
cerr << "\\x00";
cerr << rulev[i];
}
cerr << " -> ";
tree->dump(cerr);
if (deny)
cerr << " deny";
cerr << " (0x" << hex << perms <<"/" << audit << dec << ")";
accept->dump(cerr);
cerr << "\n\n";
}
add_to_rules(tree, accept);
rule_count++;
return true;
}
/* create a dfa from the ruleset
* returns: buffer contain dfa tables, @size set to the size of the tables
* else NULL on failure
*/
void *aare_rules::create_dfa(size_t *size, dfaflags_t flags)
{
char *buffer = NULL;
/* finish constructing the expr tree from the different permission
* set nodes */
PermExprMap::iterator i = expr_map.begin();
if (i != expr_map.end()) {
Move rule simplification into the tree construction phase The current rule simplification algorithm has issues that need to be addressed in a rewrite, but it is still often a win, especially for larger profiles. However doing rule simplification as a single pass limits what it can do. We default to right simplification first because this has historically shown the most benefits. For two reasons 1. It allowed better grouping of the split out accept nodes that we used to do (changed in previous patches) 2. because trailing regexes like /foo/**, /foo/**.txt, can be combined and they are the largest source of node set explosion. However the move to unique node sets, eliminates 1, and forces 2 to work within only the single unique permission set on the right side factoring pass, but it still incures the penalty of walking the whole tree looking for potential nodes to factor. Moving tree simplification into the construction phases gets rid of the need for the right side factoring pass to walk other node sets that will never combine, and since we are doing simplification we can do it before the cat and permission nodes are added reducing the set of nodes to look at by another two. We do loose the ability to combine nodes from different sets during the left factoring pass, but experimentation shows that doing simplification only within the unique permission sets achieve most of the factoring that a single global pass would achieve. Signed-off-by: John Johansen <john.johansen@canonical.com> Acked-by: Steve Beattie <steve@nxnw.org>
2015-06-25 16:38:04 -06:00
if (flags & DFA_CONTROL_TREE_SIMPLE) {
Node *tmp = simplify_tree(i->second, flags);
root = new CatNode(tmp, i->first);
} else
root = new CatNode(i->second, i->first);
for (i++; i != expr_map.end(); i++) {
Move rule simplification into the tree construction phase The current rule simplification algorithm has issues that need to be addressed in a rewrite, but it is still often a win, especially for larger profiles. However doing rule simplification as a single pass limits what it can do. We default to right simplification first because this has historically shown the most benefits. For two reasons 1. It allowed better grouping of the split out accept nodes that we used to do (changed in previous patches) 2. because trailing regexes like /foo/**, /foo/**.txt, can be combined and they are the largest source of node set explosion. However the move to unique node sets, eliminates 1, and forces 2 to work within only the single unique permission set on the right side factoring pass, but it still incures the penalty of walking the whole tree looking for potential nodes to factor. Moving tree simplification into the construction phases gets rid of the need for the right side factoring pass to walk other node sets that will never combine, and since we are doing simplification we can do it before the cat and permission nodes are added reducing the set of nodes to look at by another two. We do loose the ability to combine nodes from different sets during the left factoring pass, but experimentation shows that doing simplification only within the unique permission sets achieve most of the factoring that a single global pass would achieve. Signed-off-by: John Johansen <john.johansen@canonical.com> Acked-by: Steve Beattie <steve@nxnw.org>
2015-06-25 16:38:04 -06:00
Node *tmp;
if (flags & DFA_CONTROL_TREE_SIMPLE) {
tmp = simplify_tree(i->second, flags);
} else
tmp = i->second;
root = new AltNode(root, new CatNode(tmp, i->first));
}
}
Move rule simplification into the tree construction phase The current rule simplification algorithm has issues that need to be addressed in a rewrite, but it is still often a win, especially for larger profiles. However doing rule simplification as a single pass limits what it can do. We default to right simplification first because this has historically shown the most benefits. For two reasons 1. It allowed better grouping of the split out accept nodes that we used to do (changed in previous patches) 2. because trailing regexes like /foo/**, /foo/**.txt, can be combined and they are the largest source of node set explosion. However the move to unique node sets, eliminates 1, and forces 2 to work within only the single unique permission set on the right side factoring pass, but it still incures the penalty of walking the whole tree looking for potential nodes to factor. Moving tree simplification into the construction phases gets rid of the need for the right side factoring pass to walk other node sets that will never combine, and since we are doing simplification we can do it before the cat and permission nodes are added reducing the set of nodes to look at by another two. We do loose the ability to combine nodes from different sets during the left factoring pass, but experimentation shows that doing simplification only within the unique permission sets achieve most of the factoring that a single global pass would achieve. Signed-off-by: John Johansen <john.johansen@canonical.com> Acked-by: Steve Beattie <steve@nxnw.org>
2015-06-25 16:38:04 -06:00
/* dumping of the none simplified tree without -O no-expr-simplify
* is broken because we need to build the tree above first, and
* simplification is woven into the build. Reevaluate how to fix
* this debug dump.
*/
label_nodes(root);
if (flags & DFA_DUMP_TREE) {
cerr << "\nDFA: Expression Tree\n";
root->dump(cerr);
cerr << "\n\n";
}
if (flags & DFA_CONTROL_TREE_SIMPLE) {
Move rule simplification into the tree construction phase The current rule simplification algorithm has issues that need to be addressed in a rewrite, but it is still often a win, especially for larger profiles. However doing rule simplification as a single pass limits what it can do. We default to right simplification first because this has historically shown the most benefits. For two reasons 1. It allowed better grouping of the split out accept nodes that we used to do (changed in previous patches) 2. because trailing regexes like /foo/**, /foo/**.txt, can be combined and they are the largest source of node set explosion. However the move to unique node sets, eliminates 1, and forces 2 to work within only the single unique permission set on the right side factoring pass, but it still incures the penalty of walking the whole tree looking for potential nodes to factor. Moving tree simplification into the construction phases gets rid of the need for the right side factoring pass to walk other node sets that will never combine, and since we are doing simplification we can do it before the cat and permission nodes are added reducing the set of nodes to look at by another two. We do loose the ability to combine nodes from different sets during the left factoring pass, but experimentation shows that doing simplification only within the unique permission sets achieve most of the factoring that a single global pass would achieve. Signed-off-by: John Johansen <john.johansen@canonical.com> Acked-by: Steve Beattie <steve@nxnw.org>
2015-06-25 16:38:04 -06:00
/* This is old total tree, simplification point
* For now just do simplification up front. It gets most
* of the benefit running on the smaller chains, and is
* overall faster because there are less nodes. Reevaluate
* once tree simplification is rewritten
*/
//root = simplify_tree(root, flags);
if (flags & DFA_DUMP_SIMPLE_TREE) {
cerr << "\nDFA: Simplified Expression Tree\n";
root->dump(cerr);
cerr << "\n\n";
}
}
stringstream stream;
try {
DFA dfa(root, flags);
if (flags & DFA_DUMP_UNIQ_PERMS)
dfa.dump_uniq_perms("dfa");
if (flags & DFA_CONTROL_MINIMIZE) {
dfa.minimize(flags);
if (flags & DFA_DUMP_MIN_UNIQ_PERMS)
dfa.dump_uniq_perms("minimized dfa");
}
if (flags & DFA_CONTROL_FILTER_DENY &&
flags & DFA_CONTROL_MINIMIZE &&
dfa.apply_and_clear_deny()) {
/* Do a second minimization pass as removal of deny
* information has moved some states from accepting
* to none accepting partitions
*
* TODO: add this as a tail pass to minimization
* so we don't need to do a full second pass
*/
dfa.minimize(flags);
if (flags & DFA_DUMP_MIN_UNIQ_PERMS)
dfa.dump_uniq_perms("minimized dfa");
}
if (flags & DFA_CONTROL_REMOVE_UNREACHABLE)
dfa.remove_unreachable(flags);
if (flags & DFA_DUMP_STATES)
dfa.dump(cerr);
if (flags & DFA_DUMP_GRAPH)
dfa.dump_dot_graph(cerr);
map<uchar, uchar> eq;
if (flags & DFA_CONTROL_EQUIV) {
eq = dfa.equivalence_classes(flags);
dfa.apply_equivalence_classes(eq);
if (flags & DFA_DUMP_EQUIV) {
cerr << "\nDFA equivalence class\n";
dump_equivalence_classes(cerr, eq);
}
} else if (flags & DFA_DUMP_EQUIV)
cerr << "\nDFA did not generate an equivalence class\n";
Add Differential State Compression to the DFA Differential state compression encodes a state's transitions as the difference between the state and its default state (the state it is relative too). This reduces the number of transitions that need to be stored in the transition table, hence reducing the size of the dfa. There is a trade off in that a single input character may have to traverse more than one state. This is somewhat offset by reduced table sizes providing better locality and caching properties. With carefully encoding we can still make constant match time guarentees. This patch guarentees that a state that is differentially encoded will do at most 3m state traversal to match an input of length m (as opposed to a non-differentially compressed dfa doing exactly m state traversals). In practice the actually number of extra traversals is less than this becaus we selectively choose which states are differentially encoded. In addition to reducing the size of the dfa by reducing the number of transitions that have to be stored. Differential encoding reduces the number of transitions that need to be considered by comb compression, which can result in tighter packing, due to a reduction in sparseness, and also reduces the time spent in comb compression which currently uses an O(n^2) algorithm. Differential encoding will always result in a DFA that is smaller or equal in size to the encoded DFA, and will usually improve compilation times, with the performance improvements increasing as the DFA gets larger. Eg. Given a example DFA that created 8991 states after minimization. * If only comb compression (current default) is used 52057 transitions are packed into a table of 69591 entries. Achieving an efficiency of about 75% (an average of about 7.74 table entries per state). With a resulting compressed dfa16 size of 404238 bytes and a run time for the dfa compilation of real 0m9.037s user 0m8.893s sys 0m0.036s * If differential encoding + comb compression is used, 8292 of the 8991 states are differentially encoded, with 31557 trans removed. Resulting in 20500 transitions are packed into a table of 20675 entries. Acheiving an efficiency of about 99.2% (an average of about 2.3 table entries per state With a resulting compressed dfa16 size of 207874 bytes (about 48.6% reduction) and a run time for the dfa compilation of real 0m5.416s (about 40% faster) user 0m5.280s sys 0m0.040s Repeating with a larger DFA that has 17033 states after minimization. * If only comb compression (current default) is used 102992 transitions are packed into a table of 137987 entries. Achieving an efficiency of about 75% (an average of about 8.10 entries per state). With a resultant compressed dfa16 size of 790410 bytes and a run time for d compilation of real 0m28.153s user 0m27.634s sys 0m0.120s * with differential encoding 39374 transition are packed into a table of 39594 entries. Achieving an efficiency of about 99.4% (an average of about 2.32 entries per state). With a resultant compressed dfa16 size of 396838 bytes (about 50% reduction and a run time for dfa compilation of real 0m11.804s (about 58% faster) user 0m11.657s sys 0m0.084s Signed-off-by: John Johansen <john.johansen@canonical.com> Acked-by: Seth Arnold <seth.arnold@canonical.com>
2014-01-09 16:55:55 -08:00
if (flags & DFA_CONTROL_DIFF_ENCODE) {
dfa.diff_encode(flags);
if (flags & DFA_DUMP_DIFF_ENCODE)
dfa.dump_diff_encode(cerr);
}
CHFA chfa(dfa, eq, flags);
if (flags & DFA_DUMP_TRANS_TABLE)
chfa.dump(cerr);
chfa.flex_table(stream, "");
}
catch(int error) {
*size = 0;
return NULL;
}
stringbuf *buf = stream.rdbuf();
buf->pubseekpos(0);
*size = buf->in_avail();
buffer = (char *)malloc(*size);
if (!buffer)
return NULL;
buf->sgetn(buffer, *size);
return buffer;
}