Rules that are mitigated have overrides and are not remediated.
Fixes bug #7965 --- src/bin/secstate | 19 +++++++++++++++++++ src/secstate/main.py | 39 ++++++++++++++++++++++++++++++++++++++- src/secstate/util.py | 37 +++++++++++++++++++++++++++++-------- 3 files changed, 86 insertions(+), 9 deletions(-)
diff --git a/src/bin/secstate b/src/bin/secstate index c219e3f..ebaf9c8 100644 --- a/src/bin/secstate +++ b/src/bin/secstate @@ -107,6 +107,9 @@ def main():
elif subcommand == 'remediate': return remediate(sys.argv[arg_num:]) + + elif subcommand == 'mitigate': + return mitigate(sys.argv[arg_num:])
elif subcommand == 'save': return save_profile(sys.argv[arg_num:]) @@ -298,6 +301,22 @@ def save_profile(arguments): if not sec_instance.save_profile(args[0], args[1]): return -1
+def mitigate(arguments): + parser = OptionParser(usage="secstate mitigate [options] <BenchmarkID> <RuleID>") + parser.add_option('-r', '--remark', action='store', type='string', dest='remark', default=None, + help="Show extra information about the item being shown") + parser.add_option('-a', '--authority', action='store', type='string', dest='authority', default=None, + help="Show extra information about the item being shown") + (options, args) = parser.parse_args(arguments) + if options.remark == None: + options.remark = raw_input("Please enter a remark for this mitigation. Press Enter when finished\n") + if len(args) != 2: + sys.stderr.write("Wrong number of arguments passed to remediate\n'secstate remediate [options] <ContentID> <ItemID> <Remark>'\n") + return -1 + + if not sec_instance.mitigate(args[0], args[1], options.remark, options.authority): + return -1 + if __name__ == '__main__': sys.exit(main())
diff --git a/src/secstate/main.py b/src/secstate/main.py index 01405dd..3f4702a 100644 --- a/src/secstate/main.py +++ b/src/secstate/main.py @@ -285,6 +285,11 @@ class Secstate: benchmark.__dict__['puppet'] = puppet_files benchmark.config.set(benchmark.id, 'puppet', json.dumps(list(puppet_files)))
+ benchmark.__dict__['mitigations'] = {} + if benchmark.config.has_section('mitigations'): + for opt,val in benchmark.config.items('mitigations'): + benchmark.mitigations[opt] = json.loads(val) + if store_path != None: id = get_benchmark_id(benchmark_file) directory = os.path.join(bench_dir, id) @@ -1036,12 +1041,13 @@ class Secstate: os.close(site_pp_buf)
self.log.info("-- Remediating %(id)s --" % {'id':bench_id}) + ignore_ids = [] try: - ignore_ids = [] for key in benchmark.selections: if benchmark.selections[key] == False: ignore_ids.append(key) ignore_ids += passing_ids + ignore_ids += benchmark.mitigations.keys() puppet_content = parse_puppet_fixes(benchmark, ignore_ids) except SecstateException, se: sys.stderr.write('Error: %s\n' % str(se)) @@ -1067,4 +1073,35 @@ class Secstate: os.unlink(fname)
return True + + def mitigate(self, content_id, item_id, remark, authority=None): + if not self.content.has_key(content_id): + self.log.error("No content '%(id)s' has been imported" % {'id':content_id}) + return False + + benchmark = self.import_content(content_id) + if not benchmark.__dict__.has_key('oval'): + self.log.error("Cannot mitigate OVAL content") + return False + + item = benchmark.get_item(item_id) + if item == None: + self.log.error("%(bench)s does not contain item %(item)s" % {'bench':benchmark_id, 'item':item_id}) + return False + + if not benchmark.config.has_section('mitigations'): + benchmark.config.add_section('mitigations')
+ mitg_dict = {'remark':remark, 'authority':authority} + benchmark.config.set('mitigations', item_id, json.dumps(mitg_dict)) + + try: + fp = open(self.content_configs[content_id], 'w') + benchmark.config.write(fp) + fp.close() + except IOError, e: + self.log.error("Error saving changes: %(err)s" % {'err':e}) + return False + + return True + diff --git a/src/secstate/util.py b/src/secstate/util.py index fbc5d33..901c9a4 100644 --- a/src/secstate/util.py +++ b/src/secstate/util.py @@ -57,6 +57,8 @@ def xccdf_reporter(msg, usr): result = msg.user2num if result == oscap.xccdf.XCCDF_RESULT_PASS: usr['pass'] += 1 + elif msg.user1str in usr['mitigations']: + usr['mitg_total'] += 1 elif result == oscap.xccdf.XCCDF_RESULT_FAIL: usr['fail'] += 1 elif result == oscap.xccdf.XCCDF_RESULT_UNKNOWN: @@ -101,7 +103,9 @@ def evaluate_xccdf(benchmark, url_XCCDF, s_profile=None, all=False, verbose=Fals 'nselect':0, 'info':0, 'fixed':0, - 'verbose':verbose} + 'mitg_total':0, + 'verbose':verbose, + 'mitigations':benchmark.mitigations.keys()}
policy_model.register_output_callback(xccdf_reporter, res_dict)
@@ -129,10 +133,26 @@ def evaluate_xccdf(benchmark, url_XCCDF, s_profile=None, all=False, verbose=Fals score = policy.get_score(ritem, model.system) ritem.add_score(score)
+ for rule_res in ritem.rule_results: + id = rule_res.idref + if benchmark.mitigations.has_key(id): + override = oscap.xccdf.override_new() + override.new_result = oscap.xccdf.XCCDF_RESULT_INFORMATIONAL + override.old_result = rule_res.result + remark = oscap.common.text_new() + remark.text = str(benchmark.mitigations[id]['remark']) + override.remark = remark + authority = benchmark.mitigations[id]['authority'] + if authority != None: + override.authority = authority + rule_res.add_override(override) + rule_res.result = override.new_result + ritem.end_time = time.time()
print "--Results for '%(id)s' (Profile: '%(prof)s')--" % {'id':benchmark.id, 'prof':s_profile} print "Passed:\t\t%(pass)s\n" \ + "Mitigated:\t%(mitg_total)s\n" \ "Failed:\t\t%(fail)s\n" \ "Fixed:\t\t%(fixed)s\n" \ "Not Selected:\t%(nselect)s\n" \ @@ -400,13 +420,14 @@ def apply_changes_profile(benchmark): prof.add_title(new_title) prof.id = section
- for id,val in benchmark.config.items(section): - if id != 'extends': - sel_dict = json.loads(val) - select = oscap.xccdf.select_new() - select.item = id - select.selected = sel_dict['selected'] - prof.add_select(select) + if section != 'mitigations': + for id,val in benchmark.config.items(section): + if id != 'extends': + sel_dict = json.loads(val) + select = oscap.xccdf.select_new() + select.item = id + select.selected = sel_dict['selected'] + prof.add_select(select)
benchmark.add_profile(prof)
secstate-devel@lists.fedorahosted.org