Page MenuHomec4science

SecureMode.html
No OneTemporary

File Metadata

Created
Wed, Feb 26, 00:23

SecureMode.html

<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<!-- Generated by Apache Maven Doxia at 2014-02-11 -->
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Apache Hadoop 2.3.0 - Hadoop in Secure Mode</title>
<style type="text/css" media="all">
@import url("./css/maven-base.css");
@import url("./css/maven-theme.css");
@import url("./css/site.css");
</style>
<link rel="stylesheet" href="./css/print.css" type="text/css" media="print" />
<meta name="Date-Revision-yyyymmdd" content="20140211" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head>
<body class="composite">
<div id="banner">
<a href="http://hadoop.apache.org/" id="bannerLeft">
<img src="http://hadoop.apache.org/images/hadoop-logo.jpg" alt="" />
</a>
<a href="http://www.apache.org/" id="bannerRight">
<img src="http://www.apache.org/images/asf_logo_wide.png" alt="" />
</a>
<div class="clear">
<hr/>
</div>
</div>
<div id="breadcrumbs">
<div class="xleft">
<a href="http://www.apache.org/" class="externalLink">Apache</a>
&gt;
<a href="http://hadoop.apache.org/" class="externalLink">Hadoop</a>
&gt;
<a href="../">Apache Hadoop Project Dist POM</a>
&gt;
Apache Hadoop 2.3.0
</div>
<div class="xright"> <a href="http://wiki.apache.org/hadoop" class="externalLink">Wiki</a>
|
<a href="https://svn.apache.org/repos/asf/hadoop/" class="externalLink">SVN</a>
|
<a href="http://hadoop.apache.org/" class="externalLink">Apache Hadoop</a>
&nbsp;| Last Published: 2014-02-11
&nbsp;| Version: 2.3.0
</div>
<div class="clear">
<hr/>
</div>
</div>
<div id="leftColumn">
<div id="navcolumn">
<h5>General</h5>
<ul>
<li class="none">
<a href="../../index.html">Overview</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/SingleCluster.html">Single Node Setup</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/ClusterSetup.html">Cluster Setup</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/CommandsManual.html">Hadoop Commands Reference</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/FileSystemShell.html">File System Shell</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/Compatibility.html">Hadoop Compatibility</a>
</li>
</ul>
<h5>Common</h5>
<ul>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/CLIMiniCluster.html">CLI Mini Cluster</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/NativeLibraries.html">Native Libraries</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/Superusers.html">Superusers</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/SecureMode.html">Secure Mode</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/ServiceLevelAuth.html">Service Level Authorization</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/HttpAuthentication.html">HTTP Authentication</a>
</li>
</ul>
<h5>HDFS</h5>
<ul>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html">HDFS User Guide</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/HDFSHighAvailabilityWithQJM.html">High Availability With QJM</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/HDFSHighAvailabilityWithNFS.html">High Availability With NFS</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/Federation.html">Federation</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsSnapshots.html">HDFS Snapshots</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsDesign.html">HDFS Architecture</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsEditsViewer.html">Edits Viewer</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsImageViewer.html">Image Viewer</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html">Permissions and HDFS</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsQuotaAdminGuide.html">Quotas and HDFS</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/Hftp.html">HFTP</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/LibHdfs.html">C API libhdfs</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/WebHDFS.html">WebHDFS REST API</a>
</li>
<li class="none">
<a href="../../hadoop-hdfs-httpfs/index.html">HttpFS Gateway</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/ShortCircuitLocalReads.html">Short Circuit Local Reads</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/CentralizedCacheManagement.html">Centralized Cache Management</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/HdfsNfsGateway.html">HDFS NFS Gateway</a>
</li>
</ul>
<h5>MapReduce</h5>
<ul>
<li class="none">
<a href="../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html">Compatibilty between Hadoop 1.x and Hadoop 2.x</a>
</li>
<li class="none">
<a href="../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/EncryptedShuffle.html">Encrypted Shuffle</a>
</li>
<li class="none">
<a href="../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/PluggableShuffleAndPluggableSort.html">Pluggable Shuffle/Sort</a>
</li>
<li class="none">
<a href="../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/DistributedCacheDeploy.html">Distributed Cache Deploy</a>
</li>
</ul>
<h5>YARN</h5>
<ul>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/YARN.html">YARN Architecture</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/WritingYarnApplications.html">Writing YARN Applications</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html">Capacity Scheduler</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/FairScheduler.html">Fair Scheduler</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/WebApplicationProxy.html">Web Application Proxy</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/YarnCommands.html">YARN Commands</a>
</li>
<li class="none">
<a href="../../hadoop-sls/SchedulerLoadSimulator.html">Scheduler Load Simulator</a>
</li>
</ul>
<h5>YARN REST APIs</h5>
<ul>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html">Introduction</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html">Resource Manager</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html">Node Manager</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html">MR Application Master</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html">History Server</a>
</li>
</ul>
<h5>Auth</h5>
<ul>
<li class="none">
<a href="../../hadoop-auth/index.html">Overview</a>
</li>
<li class="none">
<a href="../../hadoop-auth/Examples.html">Examples</a>
</li>
<li class="none">
<a href="../../hadoop-auth/Configuration.html">Configuration</a>
</li>
<li class="none">
<a href="../../hadoop-auth/BuildingIt.html">Building</a>
</li>
</ul>
<h5>Reference</h5>
<ul>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/releasenotes.html">Release Notes</a>
</li>
<li class="none">
<a href="../../api/index.html">API docs</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/CHANGES.txt">Common CHANGES.txt</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/CHANGES.txt">HDFS CHANGES.txt</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-mapreduce/CHANGES.txt">MapReduce CHANGES.txt</a>
</li>
</ul>
<h5>Configuration</h5>
<ul>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/core-default.xml">core-default.xml</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-hdfs/hdfs-default.xml">hdfs-default.xml</a>
</li>
<li class="none">
<a href="../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml">mapred-default.xml</a>
</li>
<li class="none">
<a href="../../hadoop-yarn/hadoop-yarn-common/yarn-default.xml">yarn-default.xml</a>
</li>
<li class="none">
<a href="../../hadoop-project-dist/hadoop-common/DeprecatedProperties.html">Deprecated Properties</a>
</li>
</ul>
<a href="http://maven.apache.org/" title="Built by Maven" class="poweredBy">
<img alt="Built by Maven" src="./images/logos/maven-feather.png"/>
</a>
</div>
</div>
<div id="bodyColumn">
<div id="contentBox">
<!-- Licensed under the Apache License, Version 2.0 (the "License"); --><!-- you may not use this file except in compliance with the License. --><!-- You may obtain a copy of the License at --><!-- --><!-- http://www.apache.org/licenses/LICENSE-2.0 --><!-- --><!-- Unless required by applicable law or agreed to in writing, software --><!-- distributed under the License is distributed on an "AS IS" BASIS, --><!-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --><!-- See the License for the specific language governing permissions and --><!-- limitations under the License. See accompanying LICENSE file. --><ul>
<li><a href="#Hadoop_in_Secure_Mode">Hadoop in Secure Mode</a>
<ul>
<li><a href="#Introduction">Introduction</a></li>
<li><a href="#Authentication">Authentication</a>
<ul>
<li><a href="#End_User_Accounts">End User Accounts</a></li>
<li><a href="#User_Accounts_for_Hadoop_Daemons">User Accounts for Hadoop Daemons</a></li>
<li><a href="#Kerberos_principals_for_Hadoop_Daemons_and_Users">Kerberos principals for Hadoop Daemons and Users</a></li>
<li><a href="#Mapping_from_Kerberos_principal_to_OS_user_account">Mapping from Kerberos principal to OS user account</a></li>
<li><a href="#Mapping_from_user_to_group">Mapping from user to group</a></li>
<li><a href="#Proxy_user">Proxy user</a></li>
<li><a href="#Secure_DataNode">Secure DataNode</a></li></ul></li>
<li><a href="#Data_confidentiality">Data confidentiality</a>
<ul>
<li><a href="#Data_Encryption_on_RPC">Data Encryption on RPC</a></li>
<li><a href="#Data_Encryption_on_Block_data_transfer.">Data Encryption on Block data transfer.</a></li>
<li><a href="#Data_Encryption_on_HTTP">Data Encryption on HTTP</a></li></ul></li>
<li><a href="#Configuration">Configuration</a>
<ul>
<li><a href="#Permissions_for_both_HDFS_and_local_fileSystem_paths">Permissions for both HDFS and local fileSystem paths</a></li>
<li><a href="#Common_Configurations">Common Configurations</a></li>
<li><a href="#NameNode">NameNode</a></li>
<li><a href="#Secondary_NameNode">Secondary NameNode</a></li>
<li><a href="#DataNode">DataNode</a></li>
<li><a href="#WebHDFS">WebHDFS</a></li>
<li><a href="#ResourceManager">ResourceManager</a></li>
<li><a href="#NodeManager">NodeManager</a></li>
<li><a href="#Configuration_for_WebAppProxy">Configuration for WebAppProxy</a></li>
<li><a href="#LinuxContainerExecutor">LinuxContainerExecutor</a></li>
<li><a href="#MapReduce_JobHistory_Server">MapReduce JobHistory Server</a></li></ul></li></ul></li></ul>
<div class="section">
<h2>Hadoop in Secure Mode<a name="Hadoop_in_Secure_Mode"></a></h2>
<div class="section">
<h3>Introduction<a name="Introduction"></a></h3>
<p>This document describes how to configure authentication for Hadoop in secure mode.</p>
<p>By default Hadoop runs in non-secure mode in which no actual authentication is required. By configuring Hadoop runs in secure mode, each user and service needs to be authenticated by Kerberos in order to use Hadoop services.</p>
<p>Security features of Hadoop consist of <a href="#Authentication">authentication</a>, <a href="./ServiceLevelAuth.html">service level authorization</a>, <a href="./HttpAuthentication.html">authentication for Web consoles</a> and <a href="#Data_confidentiality">data confidenciality</a>.</p></div>
<div class="section">
<h3>Authentication<a name="Authentication"></a></h3>
<div class="section">
<h4>End User Accounts<a name="End_User_Accounts"></a></h4>
<p>When service level authentication is turned on, end users using Hadoop in secure mode needs to be authenticated by Kerberos. The simplest way to do authentication is using <tt>kinit</tt> command of Kerberos.</p></div>
<div class="section">
<h4>User Accounts for Hadoop Daemons<a name="User_Accounts_for_Hadoop_Daemons"></a></h4>
<p>Ensure that HDFS and YARN daemons run as different Unix users, e.g. <tt>hdfs</tt> and <tt>yarn</tt>. Also, ensure that the MapReduce JobHistory server runs as different user such as <tt>mapred</tt>.</p>
<p>It's recommended to have them share a Unix group, for e.g. <tt>hadoop</tt>. See also &quot;<a href="#Mapping_from_user_to_group">Mapping from user to group</a>&quot; for group management.</p>
<table border="1" class="bodyTable">
<tr class="a">
<th align="left">User:Group</th>
<th align="left">Daemons</th></tr>
<tr class="b">
<td align="left">hdfs:hadoop</td>
<td align="left">NameNode, Secondary NameNode, JournalNode, DataNode</td></tr>
<tr class="a">
<td align="left">yarn:hadoop</td>
<td align="left">ResourceManager, NodeManager</td></tr>
<tr class="b">
<td align="left">mapred:hadoop</td>
<td align="left">MapReduce JobHistory Server</td></tr></table></div>
<div class="section">
<h4>Kerberos principals for Hadoop Daemons and Users<a name="Kerberos_principals_for_Hadoop_Daemons_and_Users"></a></h4>
<p>For running hadoop service daemons in Hadoop in secure mode, Kerberos principals are required. Each service reads auhenticate information saved in keytab file with appropriate permission.</p>
<p>HTTP web-consoles should be served by principal different from RPC's one.</p>
<p>Subsections below shows the examples of credentials for Hadoop services.</p>
<div class="section">
<h5>HDFS<a name="HDFS"></a></h5>
<p>The NameNode keytab file, on the NameNode host, should look like the following:</p>
<div>
<pre>$ klist -e -k -t /etc/security/keytab/nn.service.keytab
Keytab name: FILE:/etc/security/keytab/nn.service.keytab
KVNO Timestamp Principal
4 07/18/11 21:08:09 nn/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 nn/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 nn/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)</pre></div>
<p>The Secondary NameNode keytab file, on that host, should look like the following:</p>
<div>
<pre>$ klist -e -k -t /etc/security/keytab/sn.service.keytab
Keytab name: FILE:/etc/security/keytab/sn.service.keytab
KVNO Timestamp Principal
4 07/18/11 21:08:09 sn/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 sn/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 sn/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)</pre></div>
<p>The DataNode keytab file, on each host, should look like the following:</p>
<div>
<pre>$ klist -e -k -t /etc/security/keytab/dn.service.keytab
Keytab name: FILE:/etc/security/keytab/dn.service.keytab
KVNO Timestamp Principal
4 07/18/11 21:08:09 dn/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 dn/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 dn/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)</pre></div></div>
<div class="section">
<h5>YARN<a name="YARN"></a></h5>
<p>The ResourceManager keytab file, on the ResourceManager host, should look like the following:</p>
<div>
<pre>$ klist -e -k -t /etc/security/keytab/rm.service.keytab
Keytab name: FILE:/etc/security/keytab/rm.service.keytab
KVNO Timestamp Principal
4 07/18/11 21:08:09 rm/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 rm/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 rm/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)</pre></div>
<p>The NodeManager keytab file, on each host, should look like the following:</p>
<div>
<pre>$ klist -e -k -t /etc/security/keytab/nm.service.keytab
Keytab name: FILE:/etc/security/keytab/nm.service.keytab
KVNO Timestamp Principal
4 07/18/11 21:08:09 nm/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 nm/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 nm/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)</pre></div></div>
<div class="section">
<h5>MapReduce JobHistory Server<a name="MapReduce_JobHistory_Server"></a></h5>
<p>The MapReduce JobHistory Server keytab file, on that host, should look like the following:</p>
<div>
<pre>$ klist -e -k -t /etc/security/keytab/jhs.service.keytab
Keytab name: FILE:/etc/security/keytab/jhs.service.keytab
KVNO Timestamp Principal
4 07/18/11 21:08:09 jhs/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 jhs/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 jhs/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-256 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (AES-128 CTS mode with 96-bit SHA-1 HMAC)
4 07/18/11 21:08:09 host/full.qualified.domain.name@REALM.TLD (ArcFour with HMAC/md5)</pre></div></div></div>
<div class="section">
<h4>Mapping from Kerberos principal to OS user account<a name="Mapping_from_Kerberos_principal_to_OS_user_account"></a></h4>
<p>Hadoop maps Kerberos principal to OS user account using the rule specified by <tt>hadoop.security.auth_to_local</tt> which works in the same way as the <tt>auth_to_local</tt> in <a class="externalLink" href="http://web.mit.edu/Kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html">Kerberos configuration file (krb5.conf)</a>.</p>
<p>By default, it picks the first component of principal name as a user name if the realms matches to the <tt>defalut_realm</tt> (usually defined in /etc/krb5.conf). For example, <tt>host/full.qualified.domain.name@REALM.TLD</tt> is mapped to <tt>host</tt> by default rule.</p></div>
<div class="section">
<h4>Mapping from user to group<a name="Mapping_from_user_to_group"></a></h4>
<p>Though files on HDFS are associated to owner and group, Hadoop does not have the definition of group by itself. Mapping from user to group is done by OS or LDAP.</p>
<p>You can change a way of mapping by specifying the name of mapping provider as a value of <tt>hadoop.security.group.mapping</tt> See <a href="../hadoop-hdfs/HdfsPermissionsGuide.html">HDFS Permissions Guide</a> for details.</p>
<p>Practically you need to manage SSO environment using Kerberos with LDAP for Hadoop in secure mode.</p></div>
<div class="section">
<h4>Proxy user<a name="Proxy_user"></a></h4>
<p>Some products such as Apache Oozie which access the services of Hadoop on behalf of end users need to be able to impersonate end users. You can configure proxy user using properties <tt>hadoop.proxyuser.$<a name="superuser">superuser</a>.hosts</tt> and <tt>hadoop.proxyuser.$<a name="superuser">superuser</a>.groups</tt>.</p>
<p>For example, by specifying as below in core-site.xml, user named <tt>oozie</tt> accessing from any host can impersonate any user belonging to any group.</p>
<div>
<pre> &lt;property&gt;
&lt;name&gt;hadoop.proxyuser.oozie.hosts&lt;/name&gt;
&lt;value&gt;*&lt;/value&gt;
&lt;/property&gt;
&lt;property&gt;
&lt;name&gt;hadoop.proxyuser.oozie.groups&lt;/name&gt;
&lt;value&gt;*&lt;/value&gt;
&lt;/property&gt;</pre></div></div>
<div class="section">
<h4>Secure DataNode<a name="Secure_DataNode"></a></h4>
<p>Because the data transfer protocol of DataNode does not use the RPC framework of Hadoop, DataNode must authenticate itself by using privileged ports which are specified by <tt>dfs.datanode.address</tt> and <tt>dfs.datanode.http.address</tt>. This authentication is based on the assumption that the attacker won't be able to get root privileges.</p>
<p>When you execute <tt>hdfs datanode</tt> command as root, server process binds privileged port at first, then drops privilege and runs as the user account specified by <tt>HADOOP_SECURE_DN_USER</tt>. This startup process uses jsvc installed to <tt>JSVC_HOME</tt>. You must specify <tt>HADOOP_SECURE_DN_USER</tt> and <tt>JSVC_HOME</tt> as environment variables on start up (in hadoop-env.sh).</p></div></div>
<div class="section">
<h3>Data confidentiality<a name="Data_confidentiality"></a></h3>
<div class="section">
<h4>Data Encryption on RPC<a name="Data_Encryption_on_RPC"></a></h4>
<p>The data transfered between hadoop services and clients. Setting <tt>hadoop.rpc.protection</tt> to <tt>&quot;privacy&quot;</tt> in the core-site.xml activate data encryption.</p></div>
<div class="section">
<h4>Data Encryption on Block data transfer.<a name="Data_Encryption_on_Block_data_transfer."></a></h4>
<p>You need to set <tt>dfs.encrypt.data.transfer</tt> to <tt>&quot;true&quot;</tt> in the hdfs-site.xml in order to activate data encryption for data transfer protocol of DataNode.</p></div>
<div class="section">
<h4>Data Encryption on HTTP<a name="Data_Encryption_on_HTTP"></a></h4>
<p>Data transfer between Web-console and clients are protected by using SSL(HTTPS).</p></div></div>
<div class="section">
<h3>Configuration<a name="Configuration"></a></h3>
<div class="section">
<h4>Permissions for both HDFS and local fileSystem paths<a name="Permissions_for_both_HDFS_and_local_fileSystem_paths"></a></h4>
<p>The following table lists various paths on HDFS and local filesystems (on all nodes) and recommended permissions:</p>
<table border="1" class="bodyTable">
<tr class="a">
<th align="left">Filesystem</th>
<th align="left">Path</th>
<th align="left">User:Group</th>
<th align="left">Permissions</th></tr>
<tr class="b">
<td align="left">local</td>
<td align="left"><tt>dfs.namenode.name.dir</tt></td>
<td align="left">hdfs:hadoop</td>
<td align="left">drwx------</td></tr>
<tr class="a">
<td align="left">local</td>
<td align="left"><tt>dfs.datanode.data.dir</tt></td>
<td align="left">hdfs:hadoop</td>
<td align="left">drwx------</td></tr>
<tr class="b">
<td align="left">local</td>
<td align="left">$HADOOP_LOG_DIR</td>
<td align="left">hdfs:hadoop</td>
<td align="left">drwxrwxr-x</td></tr>
<tr class="a">
<td align="left">local</td>
<td align="left">$YARN_LOG_DIR</td>
<td align="left">yarn:hadoop</td>
<td align="left">drwxrwxr-x</td></tr>
<tr class="b">
<td align="left">local</td>
<td align="left"><tt>yarn.nodemanager.local-dirs</tt></td>
<td align="left">yarn:hadoop</td>
<td align="left">drwxr-xr-x</td></tr>
<tr class="a">
<td align="left">local</td>
<td align="left"><tt>yarn.nodemanager.log-dirs</tt></td>
<td align="left">yarn:hadoop</td>
<td align="left">drwxr-xr-x</td></tr>
<tr class="b">
<td align="left">local</td>
<td align="left">container-executor</td>
<td align="left">root:hadoop</td>
<td align="left">--Sr-s---</td></tr>
<tr class="a">
<td align="left">local</td>
<td align="left"><tt>conf/container-executor.cfg</tt></td>
<td align="left">root:hadoop</td>
<td align="left">r--------</td></tr>
<tr class="b">
<td align="left">hdfs</td>
<td align="left">/</td>
<td align="left">hdfs:hadoop</td>
<td align="left">drwxr-xr-x</td></tr>
<tr class="a">
<td align="left">hdfs</td>
<td align="left">/tmp</td>
<td align="left">hdfs:hadoop</td>
<td align="left">drwxrwxrwxt</td></tr>
<tr class="b">
<td align="left">hdfs</td>
<td align="left">/user</td>
<td align="left">hdfs:hadoop</td>
<td align="left">drwxr-xr-x</td></tr>
<tr class="a">
<td align="left">hdfs</td>
<td align="left"><tt>yarn.nodemanager.remote-app-log-dir</tt></td>
<td align="left">yarn:hadoop</td>
<td align="left">drwxrwxrwxt</td></tr>
<tr class="b">
<td align="left">hdfs</td>
<td align="left"><tt>mapreduce.jobhistory.intermediate-done-dir</tt></td>
<td align="left">mapred:hadoop</td>
<td align="left">drwxrwxrwxt</td></tr>
<tr class="a">
<td align="left">hdfs</td>
<td align="left"><tt>mapreduce.jobhistory.done-dir</tt></td>
<td align="left">mapred:hadoop</td>
<td align="left">drwxr-x---</td></tr></table></div>
<div class="section">
<h4>Common Configurations<a name="Common_Configurations"></a></h4>
<p>In order to turn on RPC authentication in hadoop, set the value of <tt>hadoop.security.authentication</tt> property to <tt>&quot;kerberos&quot;</tt>, and set security related settings listed below appropriately.</p>
<p>The following properties should be in the <tt>core-site.xml</tt> of all the nodes in the cluster.</p>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/core-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>hadoop.security.authentication</tt></td>
<td align="left"><i>kerberos</i></td>
<td align="left"><tt>simple</tt> : No authentication. (default) &#xa0;<br /><tt>kerberos</tt> : Enable authentication by Kerberos.</td></tr>
<tr class="a">
<td align="left"><tt>hadoop.security.authorization</tt></td>
<td align="left"><i>true</i></td>
<td align="left">Enable <a href="./ServiceLevelAuth.html">RPC service-level authorization</a>.</td></tr>
<tr class="b">
<td align="left"><tt>hadoop.rpc.protection</tt></td>
<td align="left"><i>authentication</i></td>
<td align="left"><i>authentication</i> : authentication only (default) &#xa0;<br /><i>integrity</i> : integrity check in addition to authentication &#xa0;<br /><i>privacy</i> : data encryption in addition to integrity</td></tr>
<tr class="a">
<td align="left"><tt>hadoop.security.auth_to_local</tt></td>
<td align="left"><tt>RULE:</tt><i>exp1</i>&#xa0;<br /><tt>RULE:</tt><i>exp2</i>&#xa0;<br /><i>...</i>&#xa0;<br />DEFAULT</td>
<td align="left">The value is string containing new line characters. See <a class="externalLink" href="http://web.mit.edu/Kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html">Kerberos documentation</a> for format for <i>exp</i>.</td></tr>
<tr class="b">
<td align="left"><tt>hadoop.proxyuser.</tt><i>superuser</i><tt>.hosts</tt></td>
<td align="left"></td>
<td align="left">comma separated hosts from which <i>superuser</i> access are allowd to impersonation. <tt>*</tt> means wildcard.</td></tr>
<tr class="a">
<td align="left"><tt>hadoop.proxyuser.</tt><i>superuser</i><tt>.groups</tt></td>
<td align="left"></td>
<td align="left">comma separated groups to which users impersonated by <i>superuser</i> belongs. <tt>*</tt> means wildcard.</td></tr></table></div>
<div class="section">
<h4>NameNode<a name="NameNode"></a></h4>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/hdfs-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>dfs.block.access.token.enable</tt></td>
<td align="left"><i>true</i></td>
<td align="left">Enable HDFS block access tokens for secure operations.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.https.enable</tt></td>
<td align="left"><i>true</i></td>
<td align="left">This value is deprecated. Use dfs.http.policy</td></tr>
<tr class="b">
<td align="left"><tt>dfs.http.policy</tt></td>
<td align="left"><i>HTTP_ONLY</i> or <i>HTTPS_ONLY</i> or <i>HTTP_AND_HTTPS</i></td>
<td align="left">HTTPS_ONLY turns off http access. This option takes precedence over the deprecated configuration dfs.https.enable and hadoop.ssl.enabled.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.namenode.https-address</tt></td>
<td align="left"><i>nn_host_fqdn:50470</i></td>
<td align="left"></td></tr>
<tr class="b">
<td align="left"><tt>dfs.https.port</tt></td>
<td align="left"><i>50470</i></td>
<td align="left"></td></tr>
<tr class="a">
<td align="left"><tt>dfs.namenode.keytab.file</tt></td>
<td align="left"><i>/etc/security/keytab/nn.service.keytab</i></td>
<td align="left">Kerberos keytab file for the NameNode.</td></tr>
<tr class="b">
<td align="left"><tt>dfs.namenode.kerberos.principal</tt></td>
<td align="left">nn/_HOST@REALM.TLD</td>
<td align="left">Kerberos principal name for the NameNode.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.namenode.kerberos.https.principal</tt></td>
<td align="left">host/_HOST@REALM.TLD</td>
<td align="left">HTTPS Kerberos principal name for the NameNode.</td></tr></table></div>
<div class="section">
<h4>Secondary NameNode<a name="Secondary_NameNode"></a></h4>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/hdfs-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>dfs.namenode.secondary.http-address</tt></td>
<td align="left"><i>c_nn_host_fqdn:50090</i></td>
<td align="left"></td></tr>
<tr class="a">
<td align="left"><tt>dfs.namenode.secondary.https-port</tt></td>
<td align="left"><i>50470</i></td>
<td align="left"></td></tr>
<tr class="b">
<td align="left"><tt>dfs.namenode.secondary.keytab.file</tt></td>
<td align="left"><i>/etc/security/keytab/sn.service.keytab</i></td>
<td align="left">Kerberos keytab file for the NameNode.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.namenode.secondary.kerberos.principal</tt></td>
<td align="left">sn/_HOST@REALM.TLD</td>
<td align="left">Kerberos principal name for the Secondary NameNode.</td></tr>
<tr class="b">
<td align="left"><tt>dfs.namenode.secondary.kerberos.https.principal</tt></td>
<td align="left">host/_HOST@REALM.TLD</td>
<td align="left">HTTPS Kerberos principal name for the Secondary NameNode.</td></tr></table></div>
<div class="section">
<h4>DataNode<a name="DataNode"></a></h4>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/hdfs-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>dfs.datanode.data.dir.perm</tt></td>
<td align="left">700</td>
<td align="left"></td></tr>
<tr class="a">
<td align="left"><tt>dfs.datanode.address</tt></td>
<td align="left"><i>0.0.0.0:1004</i></td>
<td align="left">Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc.</td></tr>
<tr class="b">
<td align="left"><tt>dfs.datanode.http.address</tt></td>
<td align="left"><i>0.0.0.0:1006</i></td>
<td align="left">Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.datanode.https.address</tt></td>
<td align="left"><i>0.0.0.0:50470</i></td>
<td align="left"></td></tr>
<tr class="b">
<td align="left"><tt>dfs.datanode.keytab.file</tt></td>
<td align="left"><i>/etc/security/keytab/dn.service.keytab</i></td>
<td align="left">Kerberos keytab file for the DataNode.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.datanode.kerberos.principal</tt></td>
<td align="left">dn/_HOST@REALM.TLD</td>
<td align="left">Kerberos principal name for the DataNode.</td></tr>
<tr class="b">
<td align="left"><tt>dfs.datanode.kerberos.https.principal</tt></td>
<td align="left">host/_HOST@REALM.TLD</td>
<td align="left">HTTPS Kerberos principal name for the DataNode.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.encrypt.data.transfer</tt></td>
<td align="left"><i>false</i></td>
<td align="left">set to <tt>true</tt> when using data encryption</td></tr></table></div>
<div class="section">
<h4>WebHDFS<a name="WebHDFS"></a></h4>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/hdfs-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>dfs.webhdfs.enabled</tt></td>
<td align="left">http/_HOST@REALM.TLD</td>
<td align="left">Enable security on WebHDFS.</td></tr>
<tr class="a">
<td align="left"><tt>dfs.web.authentication.kerberos.principal</tt></td>
<td align="left">http/_HOST@REALM.TLD</td>
<td align="left">Kerberos keytab file for the WebHDFS.</td></tr>
<tr class="b">
<td align="left"><tt>dfs.web.authentication.kerberos.keytab</tt></td>
<td align="left"><i>/etc/security/keytab/http.service.keytab</i></td>
<td align="left">Kerberos principal name for WebHDFS.</td></tr></table></div>
<div class="section">
<h4>ResourceManager<a name="ResourceManager"></a></h4>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/yarn-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>yarn.resourcemanager.keytab</tt></td>
<td align="left"><i>/etc/security/keytab/rm.service.keytab</i></td>
<td align="left">Kerberos keytab file for the ResourceManager.</td></tr>
<tr class="a">
<td align="left"><tt>yarn.resourcemanager.principal</tt></td>
<td align="left">rm/_HOST@REALM.TLD</td>
<td align="left">Kerberos principal name for the ResourceManager.</td></tr></table></div>
<div class="section">
<h4>NodeManager<a name="NodeManager"></a></h4>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/yarn-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>yarn.nodemanager.keytab</tt></td>
<td align="left"><i>/etc/security/keytab/nm.service.keytab</i></td>
<td align="left">Kerberos keytab file for the NodeManager.</td></tr>
<tr class="a">
<td align="left"><tt>yarn.nodemanager.principal</tt></td>
<td align="left">nm/_HOST@REALM.TLD</td>
<td align="left">Kerberos principal name for the NodeManager.</td></tr>
<tr class="b">
<td align="left"><tt>yarn.nodemanager.container-executor.class</tt></td>
<td align="left"><tt>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</tt></td>
<td align="left">Use LinuxContainerExecutor.</td></tr>
<tr class="a">
<td align="left"><tt>yarn.nodemanager.linux-container-executor.group</tt></td>
<td align="left"><i>hadoop</i></td>
<td align="left">Unix group of the NodeManager.</td></tr>
<tr class="b">
<td align="left"><tt>yarn.nodemanager.linux-container-executor.path</tt></td>
<td align="left"><i>/path/to/bin/container-executor</i></td>
<td align="left">The path to the executable of Linux container executor.</td></tr></table></div>
<div class="section">
<h4>Configuration for WebAppProxy<a name="Configuration_for_WebAppProxy"></a></h4>
<p>The <tt>WebAppProxy</tt> provides a proxy between the web applications exported by an application and an end user. If security is enabled it will warn users before accessing a potentially unsafe web application. Authentication and authorization using the proxy is handled just like any other privileged web application.</p>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/yarn-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>yarn.web-proxy.address</tt></td>
<td align="left"><tt>WebAppProxy</tt> host:port for proxy to AM web apps.</td>
<td align="left"><i>host:port</i> if this is the same as <tt>yarn.resourcemanager.webapp.address</tt> or it is not defined then the <tt>ResourceManager</tt> will run the proxy otherwise a standalone proxy server will need to be launched.</td></tr>
<tr class="a">
<td align="left"><tt>yarn.web-proxy.keytab</tt></td>
<td align="left"><i>/etc/security/keytab/web-app.service.keytab</i></td>
<td align="left">Kerberos keytab file for the WebAppProxy.</td></tr>
<tr class="b">
<td align="left"><tt>yarn.web-proxy.principal</tt></td>
<td align="left">wap/_HOST@REALM.TLD</td>
<td align="left">Kerberos principal name for the WebAppProxy.</td></tr></table></div>
<div class="section">
<h4>LinuxContainerExecutor<a name="LinuxContainerExecutor"></a></h4>
<p>A <tt>ContainerExecutor</tt> used by YARN framework which define how any <i>container</i> launched and controlled.</p>
<p>The following are the available in Hadoop YARN:</p>
<table border="1" class="bodyTable">
<tr class="a">
<th align="left">ContainerExecutor</th>
<th align="left">Description</th></tr>
<tr class="b">
<td align="left"><tt>DefaultContainerExecutor</tt></td>
<td align="left">The default executor which YARN uses to manage container execution. The container process has the same Unix user as the NodeManager.</td></tr>
<tr class="a">
<td align="left"><tt>LinuxContainerExecutor</tt></td>
<td align="left">Supported only on GNU/Linux, this executor runs the containers as either the YARN user who submitted the application (when full security is enabled) or as a dedicated user (defaults to nobody) when full security is not enabled. When full security is enabled, this executor requires all user accounts to be created on the cluster nodes where the containers are launched. It uses a <i>setuid</i> executable that is included in the Hadoop distribution. The NodeManager uses this executable to launch and kill containers. The setuid executable switches to the user who has submitted the application and launches or kills the containers. For maximum security, this executor sets up restricted permissions and user/group ownership of local files and directories used by the containers such as the shared objects, jars, intermediate files, log files etc. Particularly note that, because of this, except the application owner and NodeManager, no other user can access any of the local files/directories including those localized as part of the distributed cache.</td></tr></table>
<p>To build the LinuxContainerExecutor executable run:</p>
<div>
<pre> $ mvn package -Dcontainer-executor.conf.dir=/etc/hadoop/</pre></div>
<p>The path passed in <tt>-Dcontainer-executor.conf.dir</tt> should be the path on the cluster nodes where a configuration file for the setuid executable should be located. The executable should be installed in $HADOOP_YARN_HOME/bin.</p>
<p>The executable must have specific permissions: 6050 or --Sr-s--- permissions user-owned by <i>root</i> (super-user) and group-owned by a special group (e.g. <tt>hadoop</tt>) of which the NodeManager Unix user is the group member and no ordinary application user is. If any application user belongs to this special group, security will be compromised. This special group name should be specified for the configuration property <tt>yarn.nodemanager.linux-container-executor.group</tt> in both <tt>conf/yarn-site.xml</tt> and <tt>conf/container-executor.cfg</tt>.</p>
<p>For example, let's say that the NodeManager is run as user <i>yarn</i> who is part of the groups users and <i>hadoop</i>, any of them being the primary group. Let also be that <i>users</i> has both <i>yarn</i> and another user (application submitter) <i>alice</i> as its members, and <i>alice</i> does not belong to <i>hadoop</i>. Going by the above description, the setuid/setgid executable should be set 6050 or --Sr-s--- with user-owner as <i>yarn</i> and group-owner as <i>hadoop</i> which has <i>yarn</i> as its member (and not <i>users</i> which has <i>alice</i> also as its member besides <i>yarn</i>).</p>
<p>The LinuxTaskController requires that paths including and leading up to the directories specified in <tt>yarn.nodemanager.local-dirs</tt> and <tt>yarn.nodemanager.log-dirs</tt> to be set 755 permissions as described above in the table on permissions on directories.</p>
<ul>
<li><tt>conf/container-executor.cfg</tt></li></ul>
<p>The executable requires a configuration file called <tt>container-executor.cfg</tt> to be present in the configuration directory passed to the mvn target mentioned above.</p>
<p>The configuration file must be owned by the user running NodeManager (user <tt>yarn</tt> in the above example), group-owned by anyone and should have the permissions 0400 or r--------.</p>
<p>The executable requires following configuration items to be present in the <tt>conf/container-executor.cfg</tt> file. The items should be mentioned as simple key=value pairs, one per-line:</p>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/yarn-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>yarn.nodemanager.linux-container-executor.group</tt></td>
<td align="left"><i>hadoop</i></td>
<td align="left">Unix group of the NodeManager. The group owner of the <i>container-executor</i> binary should be this group. Should be same as the value with which the NodeManager is configured. This configuration is required for validating the secure access of the <i>container-executor</i> binary.</td></tr>
<tr class="a">
<td align="left"><tt>banned.users</tt></td>
<td align="left">hfds,yarn,mapred,bin</td>
<td align="left">Banned users.</td></tr>
<tr class="b">
<td align="left"><tt>allowed.system.users</tt></td>
<td align="left">foo,bar</td>
<td align="left">Allowed system users.</td></tr>
<tr class="a">
<td align="left"><tt>min.user.id</tt></td>
<td align="left">1000</td>
<td align="left">Prevent other super-users.</td></tr></table>
<p>To re-cap, here are the local file-sysytem permissions required for the various paths related to the <tt>LinuxContainerExecutor</tt>:</p>
<table border="1" class="bodyTable">
<tr class="a">
<th align="left">Filesystem</th>
<th align="left">Path</th>
<th align="left">User:Group</th>
<th align="left">Permissions</th></tr>
<tr class="b">
<td align="left">local</td>
<td align="left">container-executor</td>
<td align="left">root:hadoop</td>
<td align="left">--Sr-s---</td></tr>
<tr class="a">
<td align="left">local</td>
<td align="left"><tt>conf/container-executor.cfg</tt></td>
<td align="left">root:hadoop</td>
<td align="left">r--------</td></tr>
<tr class="b">
<td align="left">local</td>
<td align="left"><tt>yarn.nodemanager.local-dirs</tt></td>
<td align="left">yarn:hadoop</td>
<td align="left">drwxr-xr-x</td></tr>
<tr class="a">
<td align="left">local</td>
<td align="left"><tt>yarn.nodemanager.log-dirs</tt></td>
<td align="left">yarn:hadoop</td>
<td align="left">drwxr-xr-x</td></tr></table></div>
<div class="section">
<h4>MapReduce JobHistory Server<a name="MapReduce_JobHistory_Server"></a></h4>
<table border="1" class="bodyTable"><caption>Configuration for <tt>conf/mapred-site.xml</tt>
</caption>
<tr class="a">
<th align="left">Parameter</th>
<th align="left">Value</th>
<th align="left">Notes</th></tr>
<tr class="b">
<td align="left"><tt>mapreduce.jobhistory.address</tt></td>
<td align="left">MapReduce JobHistory Server <i>host:port</i></td>
<td align="left">Default port is 10020.</td></tr>
<tr class="a">
<td align="left"><tt>mapreduce.jobhistory.keytab</tt></td>
<td align="left"><i>/etc/security/keytab/jhs.service.keytab</i></td>
<td align="left">Kerberos keytab file for the MapReduce JobHistory Server.</td></tr>
<tr class="b">
<td align="left"><tt>mapreduce.jobhistory.principal</tt></td>
<td align="left">jhs/_HOST@REALM.TLD</td>
<td align="left">Kerberos principal name for the MapReduce JobHistory Server.</td></tr></table></div></div></div>
</div>
</div>
<div class="clear">
<hr/>
</div>
<div id="footer">
<div class="xright">&#169; 2014
Apache Software Foundation
- <a href="http://maven.apache.org/privacy-policy.html">Privacy Policy</a></div>
<div class="clear">
<hr/>
</div>
</div>
</body>
</html>

Event Timeline