Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F102952544
start-dfs.sh
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Tue, Feb 25, 19:14
Size
3 KB
Mime Type
text/x-shellscript
Expires
Thu, Feb 27, 19:14 (1 d, 23 h)
Engine
blob
Format
Raw Data
Handle
24350608
Attached To
R3704 elastic-yarn
start-dfs.sh
View Options
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Start hadoop dfs daemons.
# Optinally upgrade or rollback dfs state.
# Run this on master node.
usage
=
"Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
bin
=
`
dirname
"${BASH_SOURCE-$0}"
`
bin
=
`
cd
"$bin"
;
pwd
`
DEFAULT_LIBEXEC_DIR
=
"$bin"
/../libexec
HADOOP_LIBEXEC_DIR
=
${
HADOOP_LIBEXEC_DIR
:-
$DEFAULT_LIBEXEC_DIR
}
.
$HADOOP_LIBEXEC_DIR
/hdfs-config.sh
# get arguments
if
[
$#
-ge 1
]
;
then
nameStartOpt
=
"$1"
shift
case
"$nameStartOpt"
in
(
-upgrade
)
;;
(
-rollback
)
dataStartOpt
=
"$nameStartOpt"
;;
(
*
)
echo
$usage
exit
1
;;
esac
fi
#Add other possible options
nameStartOpt
=
"$nameStartOpt $@"
#---------------------------------------------------------
# namenodes
NAMENODES
=
$(
$HADOOP_PREFIX
/bin/hdfs getconf -namenodes
)
echo
"Starting namenodes on [$NAMENODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh"
\
--config
"$HADOOP_CONF_DIR"
\
--hostnames
"$NAMENODES"
\
--script
"$bin/hdfs"
start namenode
$nameStartOpt
#---------------------------------------------------------
# datanodes (using default slaves file)
if
[
-n
"$HADOOP_SECURE_DN_USER"
]
;
then
echo
\
"Attempting to start secure cluster, skipping datanodes. "
\
"Run start-secure-dns.sh as root to complete startup."
else
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh"
\
--config
"$HADOOP_CONF_DIR"
\
--script
"$bin/hdfs"
start datanode
$dataStartOpt
fi
#---------------------------------------------------------
# secondary namenodes (if any)
SECONDARY_NAMENODES
=
$(
$HADOOP_PREFIX
/bin/hdfs getconf -secondarynamenodes 2>/dev/null
)
if
[
-n
"$SECONDARY_NAMENODES"
]
;
then
echo
"Starting secondary namenodes [$SECONDARY_NAMENODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh"
\
--config
"$HADOOP_CONF_DIR"
\
--hostnames
"$SECONDARY_NAMENODES"
\
--script
"$bin/hdfs"
start secondarynamenode
fi
#---------------------------------------------------------
# quorumjournal nodes (if any)
SHARED_EDITS_DIR
=
$(
$HADOOP_PREFIX
/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-
)
case
"$SHARED_EDITS_DIR"
in
qjournal://*
)
JOURNAL_NODES
=
$(
echo
"$SHARED_EDITS_DIR"
| sed
's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g'
)
echo
"Starting journal nodes [$JOURNAL_NODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh"
\
--config
"$HADOOP_CONF_DIR"
\
--hostnames
"$JOURNAL_NODES"
\
--script
"$bin/hdfs"
start journalnode ;;
esac
#---------------------------------------------------------
# ZK Failover controllers, if auto-HA is enabled
AUTOHA_ENABLED
=
$(
$HADOOP_PREFIX
/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled
)
if
[
"$(echo "
$AUTOHA_ENABLED
" | tr A-Z a-z)"
=
"true"
]
;
then
echo
"Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh"
\
--config
"$HADOOP_CONF_DIR"
\
--hostnames
"$NAMENODES"
\
--script
"$bin/hdfs"
start zkfc
fi
# eof
Event Timeline
Log In to Comment