[selinux-policy/f14/master: 3207/3230] hadoop 1/10 -- unconfined

Daniel J Walsh dwalsh at fedoraproject.org
Tue Oct 12 20:16:37 UTC 2010


commit bc71a042d84f170f736a679c5808351af9339e97
Author: Paul Nuzzi <pjnuzzi at tycho.ncsc.mil>
Date:   Tue Oct 5 15:59:29 2010 -0400

    hadoop 1/10 -- unconfined
    
    On 10/04/2010 02:18 PM, Christopher J. PeBenito wrote:
    > On 10/04/10 13:15, Paul Nuzzi wrote:
    >> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
    >>> On 10/01/10 11:17, Paul Nuzzi wrote:
    >>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
    >>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
    >>>>>> I updated the patch based on recommendations from the mailing list.
    >>>>>> All of hadoop's services are included in one module instead of
    >>>>>> individual ones.  Unconfined and sysadm roles are given access to
    >>>>>> hadoop and zookeeper client domain transitions. The services are started
    >>>>>> using run_init.  Let me know what you think.
    >>>>>
    >>>>> Why do some hadoop domain need to manage generic tmp?
    >>>>>
    >>>>> files_manage_generic_tmp_dirs(zookeeper_t)
    >>>>> files_manage_generic_tmp_dirs(hadoop_t)
    >>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
    >>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
    >>>>> files_manage_generic_tmp_files(hadoop_$1_t)
    >>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
    >>>>
    >>>> This has to be done for Java JMX to work.  All of the files are written to
    >>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
    >>>> all the files for each service are labeled with hadoop_*_tmp_t.  The first service
    >>>> will end up owning the directory if it is not labeled tmp_t.
    >>>
    >>> The hsperfdata dir in /tmp certainly the bane of policy writers.  Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir.  I suggest you do something like
    >>>
    >>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
    >>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
    >>>
    >>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
    >>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
    >>>
    >>
    >> That looks like a better way to handle the tmp_t problem.
    >>
    >> I changed the patch with your comments.  Hopefully this will be one of the last updates.
    >> Tested on a CDH3 cluster as a module without any problems.
    >
    > There are several little issues with style, but it'll be easier just to fix them when its committed.
    >
    > Other comments inline.
    >
    
    I did my best locking down the ports hadoop uses.  Unfortunately the services use high, randomized ports making
    tcp_connect_generic_port a must have.  Hopefully one day hadoop will settle on static ports.  I added hadoop_datanode port 50010 since it is important to lock down that service.  I changed the patch based on the rest of the comments.
    
    Signed-off-by: Paul Nuzzi <pjnuzzi at tycho.ncsc.mil>

 policy/modules/kernel/corenetwork.te.in |    5 +
 policy/modules/roles/sysadm.te          |    8 +
 policy/modules/services/hadoop.fc       |   54 +++++
 policy/modules/services/hadoop.if       |  352 ++++++++++++++++++++++++++++
 policy/modules/services/hadoop.te       |  379 +++++++++++++++++++++++++++++++
 policy/modules/system/unconfined.te     |    8 +
 6 files changed, 806 insertions(+), 0 deletions(-)
---
diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..73163db 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
 network_port(git, tcp,9418,s0, udp,9418,s0)
 network_port(gopher, tcp,70,s0, udp,70,s0)
 network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_datanode, tcp, 50010,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
 network_port(hddtemp, tcp,7634,s0)
 network_port(howl, tcp,5335,s0, udp,5353,s0)
 network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
 network_port(xen, tcp,8002,s0)
 network_port(xfs, tcp,7100,s0)
 network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
 network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
 network_port(zope, tcp,8021,s0)
 
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..d2bc2b1 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
 	# allow system administrator to use the ipsec script to look
 	# at things (e.g., ipsec auto --status)
 	# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
 	yam_run(sysadm_t, sysadm_r)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
 ifndef(`distro_redhat',`
 	optional_policy(`
 		auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..a09275d
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,54 @@
+/etc/hadoop.*(/.*)?						gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode		--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker		--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode		--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker		--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper			--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode				--	gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker				--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode				--	gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode			--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker				--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper					--	gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)?					gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop				--	gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client				--	gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server				--	gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)?						gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)?						gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)?		gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)?		gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)?		gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)?	gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode			--	gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode			--	gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker			--	gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker			--	gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode		--	gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)?						gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)?		gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)?		gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)?		gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)?	gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)?		gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)?				gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)?					gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)?					-d	gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid	--	gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid	--	gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid	--	gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid	--	gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid	--	gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..f15e5d2
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,352 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+##	The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+##	<summary>
+##	Domain prefix to be used.
+##	</summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+	gen_require(`
+		attribute hadoop_domain;
+		type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+		type hadoop_exec_t, hadoop_hsperfdata_t;
+	')
+
+	########################################
+	#
+	# Shared declarations.
+	#
+
+	type hadoop_$1_t, hadoop_domain;
+	domain_type(hadoop_$1_t)
+	domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+	type hadoop_$1_initrc_t;
+	type hadoop_$1_initrc_exec_t;
+	init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+	role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+	type hadoop_$1_lock_t;
+	files_lock_file(hadoop_$1_lock_t)
+	files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+	type hadoop_$1_log_t;
+	logging_log_file(hadoop_$1_log_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+	filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+	type hadoop_$1_var_lib_t;
+	files_type(hadoop_$1_var_lib_t)
+	filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
+
+	type hadoop_$1_initrc_var_run_t;
+	files_pid_file(hadoop_$1_initrc_var_run_t)
+	filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+
+	type hadoop_$1_tmp_t;
+	files_tmp_file(hadoop_$1_tmp_t)
+	files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+	filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+	####################################
+	#
+	# Shared hadoop_$1 initrc policy.
+	#
+
+	allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+	allow hadoop_$1_initrc_t self:capability { setuid setgid };
+	allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_initrc_t self:process setsched;
+
+	consoletype_exec(hadoop_$1_initrc_t)
+	corecmd_exec_bin(hadoop_$1_initrc_t)
+	corecmd_exec_shell(hadoop_$1_initrc_t)
+
+	domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+	dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+	files_read_etc_files(hadoop_$1_initrc_t)
+	files_read_usr_files(hadoop_$1_initrc_t)
+	files_search_pids(hadoop_$1_initrc_t)
+	files_search_locks(hadoop_$1_initrc_t)
+	fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+	hadoop_exec_config_files(hadoop_$1_initrc_t)
+
+	init_rw_utmp(hadoop_$1_initrc_t)
+	init_use_script_ptys(hadoop_$1_initrc_t)
+
+	kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+	kernel_read_sysctl(hadoop_$1_initrc_t)
+	kernel_read_system_state(hadoop_$1_initrc_t)
+
+	logging_send_syslog_msg(hadoop_$1_initrc_t)
+	logging_send_audit_msgs(hadoop_$1_initrc_t)
+	logging_search_logs(hadoop_$1_initrc_t)
+
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+	manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+	miscfiles_read_localization(hadoop_$1_initrc_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_initrc_t)
+	')
+
+	term_use_generic_ptys(hadoop_$1_initrc_t)
+
+	userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+	####################################
+	#
+	# Shared hadoop_$1 policy.
+	#
+
+	allow hadoop_$1_t hadoop_domain:process signull;
+	allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+	allow hadoop_$1_t self:process execmem;
+	allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+	corecmd_exec_bin(hadoop_$1_t)
+	corecmd_exec_shell(hadoop_$1_t)
+
+	dev_read_rand(hadoop_$1_t)
+	dev_read_urand(hadoop_$1_t)
+	dev_read_sysfs(hadoop_$1_t)
+	dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+	files_read_etc_files(hadoop_$1_t)
+	files_search_pids(hadoop_$1_t)
+	files_search_var_lib(hadoop_$1_t)
+
+	hadoop_exec_config_files(hadoop_$1_t)
+
+	java_exec(hadoop_$1_t)
+
+	kernel_read_network_state(hadoop_$1_t)
+	kernel_read_system_state(hadoop_$1_t)
+
+	logging_search_logs(hadoop_$1_t)
+
+	manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+	manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+	miscfiles_read_localization(hadoop_$1_t)
+
+	optional_policy(`
+		nscd_socket_use(hadoop_$1_t)
+	')
+
+	sysnet_read_config(hadoop_$1_t)
+
+	allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+	corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+	corenet_all_recvfrom_netlabel(hadoop_$1_t)
+	corenet_tcp_bind_all_nodes(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+	corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+	# Hadoop uses high ordered random ports for services
+	# If permanent ports are chosen, remove line below and lock down
+	corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+	allow hadoop_$1_t self:udp_socket create_socket_perms;
+	corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+	corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+	corenet_udp_bind_all_nodes(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+	gen_require(`
+		type hadoop_t, hadoop_exec_t;
+	')
+
+	files_search_usr($1)
+	libs_search_lib($1)
+	domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute hadoop in the hadoop domain,
+##	and allow the specified role the
+##	hadoop domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+	gen_require(`
+		type hadoop_t;
+	')
+
+	hadoop_domtrans($1)
+	role $2 types hadoop_t;
+
+	allow $1 hadoop_t:process { ptrace signal_perms };
+	ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_client',`
+	gen_require(`
+		type zookeeper_t, zookeeper_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper server domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_server',`
+	gen_require(`
+		type zookeeper_server_t, zookeeper_server_exec_t;
+	')
+
+	corecmd_search_bin($1)
+	files_search_usr($1)
+	domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper server in the
+##	zookeeper domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+#
+interface(`hadoop_zookeeper_initrc_domtrans_server',`
+	gen_require(`
+		type zookeeper_server_initrc_exec_t;
+	')
+
+	init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+##	Execute zookeeper client in the
+##	zookeeper client domain, and allow the
+##	specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+##	<summary>
+##	Domain allowed to transition.
+##	</summary>
+## </param>
+## <param name="role">
+##	<summary>
+##	Role allowed access.
+##	</summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_zookeeper_run_client',`
+	gen_require(`
+		type zookeeper_t;
+	')
+
+	hadoop_domtrans_zookeeper_client($1)
+	role $2 types zookeeper_t;
+
+	allow $1 zookeeper_t:process { ptrace signal_perms };
+	ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to read
+##  hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_read_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	allow $1 hadoop_etc_t:dir search_dir_perms;
+	allow $1 hadoop_etc_t:lnk_file { read getattr };
+	allow $1 hadoop_etc_t:file read_file_perms;
+')
+
+########################################
+## <summary>
+##  Give permission to a domain to
+##  execute hadoop_etc_t
+## </summary>
+## <param name="domain">
+##  <summary>
+##  Domain needing read and execute
+##  permission
+##  </summary>
+## </param>
+#
+interface(`hadoop_exec_config_files', `
+	gen_require(`
+		type hadoop_etc_t;
+	')
+
+	hadoop_read_config_files($1)
+	allow $1 hadoop_etc_t:file { execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..587c393
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,379 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+	nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+	nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_associate(hadoop_tasktracker_t)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+	nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..f1e6c9f 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
 ')
 
 optional_policy(`
+	hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
 	inn_domtrans(unconfined_t)
 ')
 
@@ -210,6 +214,10 @@ optional_policy(`
 	xserver_domtrans(unconfined_t)
 ')
 
+optional_policy(`
+	hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
 ########################################
 #
 # Unconfined Execmem Local policy


More information about the scm-commits mailing list