diff --git a/hadoop-3.4.0/bin/gmock.pdb b/hadoop-3.4.0/bin/gmock.pdb new file mode 100644 index 0000000..c5c6570 Binary files /dev/null and b/hadoop-3.4.0/bin/gmock.pdb differ diff --git a/hadoop-3.4.0/bin/gmock_main.pdb b/hadoop-3.4.0/bin/gmock_main.pdb new file mode 100644 index 0000000..8dd63a3 Binary files /dev/null and b/hadoop-3.4.0/bin/gmock_main.pdb differ diff --git a/hadoop-3.4.0/bin/gtest.pdb b/hadoop-3.4.0/bin/gtest.pdb new file mode 100644 index 0000000..a8d9a99 Binary files /dev/null and b/hadoop-3.4.0/bin/gtest.pdb differ diff --git a/hadoop-3.4.0/bin/gtest_main.pdb b/hadoop-3.4.0/bin/gtest_main.pdb new file mode 100644 index 0000000..fd836c4 Binary files /dev/null and b/hadoop-3.4.0/bin/gtest_main.pdb differ diff --git a/hadoop-3.4.0/bin/hadoop b/hadoop-3.4.0/bin/hadoop new file mode 100644 index 0000000..1218d22 --- /dev/null +++ b/hadoop-3.4.0/bin/hadoop @@ -0,0 +1,244 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The name of the script being executed. +HADOOP_SHELL_EXECNAME="hadoop" +MYNAME="${BASH_SOURCE-$0}" + +## @description build up the hadoop command's usage text. +## @audience public +## @stability stable +## @replaceable no +function hadoop_usage +{ + hadoop_add_option "buildpaths" "attempt to add class files from build tree" + hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in worker mode" + hadoop_add_option "loglevel level" "set the log4j level for this command" + hadoop_add_option "hosts filename" "list of hosts to use in worker mode" + hadoop_add_option "workers" "turn on worker mode" + + hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability" + hadoop_add_subcommand "classpath" client "prints the class path needed to get the Hadoop jar and the required libraries" + hadoop_add_subcommand "conftest" client "validate configuration XML files" + hadoop_add_subcommand "credential" client "interact with credential providers" + hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon" + hadoop_add_subcommand "dtutil" client "operations related to delegation tokens" + hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "fs" client "run a generic filesystem user client" + hadoop_add_subcommand "jar " client "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command." + hadoop_add_subcommand "jnipath" client "prints the java.library.path" + hadoop_add_subcommand "kerbname" client "show auth_to_local principal conversion" + hadoop_add_subcommand "key" client "manage keys via the KeyProvider" + hadoop_add_subcommand "registrydns" daemon "run the registry DNS server" + hadoop_add_subcommand "version" client "print the version" + hadoop_add_subcommand "kdiag" client "Diagnose Kerberos Problems" + hadoop_add_subcommand "rbfbalance" client "move directories and files across router-based federation namespaces" + hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true +} + +## @description Default command handler for hadoop command +## @audience public +## @stability stable +## @replaceable no +## @param CLI arguments +function hadoopcmd_case +{ + subcmd=$1 + shift + + case ${subcmd} in + balancer|datanode|dfs|dfsadmin|dfsgroups| \ + namenode|secondarynamenode|fsck|fetchdt|oiv| \ + portmap|nfs3) + hadoop_error "WARNING: Use of this script to execute ${subcmd} is deprecated." + subcmd=${subcmd/dfsgroups/groups} + hadoop_error "WARNING: Attempting to execute replacement \"hdfs ${subcmd}\" instead." + hadoop_error "" + #try to locate hdfs and if present, delegate to it. + if [[ -f "${HADOOP_HDFS_HOME}/bin/hdfs" ]]; then + exec "${HADOOP_HDFS_HOME}/bin/hdfs" \ + --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@" + elif [[ -f "${HADOOP_HOME}/bin/hdfs" ]]; then + exec "${HADOOP_HOME}/bin/hdfs" \ + --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@" + else + hadoop_error "HADOOP_HDFS_HOME not found!" + exit 1 + fi + ;; + + #mapred commands for backwards compatibility + pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker) + hadoop_error "WARNING: Use of this script to execute ${subcmd} is deprecated." + subcmd=${subcmd/mrgroups/groups} + hadoop_error "WARNING: Attempting to execute replacement \"mapred ${subcmd}\" instead." + hadoop_error "" + #try to locate mapred and if present, delegate to it. + if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then + exec "${HADOOP_MAPRED_HOME}/bin/mapred" \ + --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@" + elif [[ -f "${HADOOP_HOME}/bin/mapred" ]]; then + exec "${HADOOP_HOME}/bin/mapred" \ + --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@" + else + hadoop_error "HADOOP_MAPRED_HOME not found!" + exit 1 + fi + ;; + checknative) + HADOOP_CLASSNAME=org.apache.hadoop.util.NativeLibraryChecker + ;; + classpath) + hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@" + ;; + conftest) + HADOOP_CLASSNAME=org.apache.hadoop.util.ConfTest + ;; + credential) + HADOOP_CLASSNAME=org.apache.hadoop.security.alias.CredentialShell + ;; + daemonlog) + HADOOP_CLASSNAME=org.apache.hadoop.log.LogLevel + ;; + dtutil) + HADOOP_CLASSNAME=org.apache.hadoop.security.token.DtUtilShell + ;; + envvars) + echo "JAVA_HOME='${JAVA_HOME}'" + echo "HADOOP_COMMON_HOME='${HADOOP_COMMON_HOME}'" + echo "HADOOP_COMMON_DIR='${HADOOP_COMMON_DIR}'" + echo "HADOOP_COMMON_LIB_JARS_DIR='${HADOOP_COMMON_LIB_JARS_DIR}'" + echo "HADOOP_COMMON_LIB_NATIVE_DIR='${HADOOP_COMMON_LIB_NATIVE_DIR}'" + echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'" + echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'" + echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'" + echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" + if [[ -n "${QATESTMODE}" ]]; then + echo "MYNAME=${MYNAME}" + echo "HADOOP_SHELL_EXECNAME=${HADOOP_SHELL_EXECNAME}" + fi + exit 0 + ;; + fs) + HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell + ;; + jar) + if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then + hadoop_error "WARNING: Use \"yarn jar\" to launch YARN applications." + fi + if [[ -z $1 || $1 = "--help" ]]; then + echo "Usage: hadoop jar [mainClass] args..." + exit 0 + fi + HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar + ;; + jnipath) + hadoop_finalize + echo "${JAVA_LIBRARY_PATH}" + exit 0 + ;; + kerbname) + HADOOP_CLASSNAME=org.apache.hadoop.security.HadoopKerberosName + ;; + kdiag) + HADOOP_CLASSNAME=org.apache.hadoop.security.KDiag + ;; + key) + HADOOP_CLASSNAME=org.apache.hadoop.crypto.key.KeyShell + ;; + registrydns) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_SECURE_CLASSNAME='org.apache.hadoop.registry.server.dns.PrivilegedRegistryDNSStarter' + HADOOP_CLASSNAME='org.apache.hadoop.registry.server.dns.RegistryDNSServer' + ;; + version) + HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo + ;; + rbfbalance) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.rbfbalance.RouterFedBalance + hadoop_add_to_classpath_tools hadoop-federation-balance + hadoop_add_to_classpath_tools hadoop-distcp + ;; + *) + HADOOP_CLASSNAME="${subcmd}" + if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then + hadoop_exit_with_usage 1 + fi + ;; + esac +} + +# This script runs the hadoop core commands. + +# let's locate libexec... +if [[ -n "${HADOOP_HOME}" ]]; then + HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" +else + bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P) + HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then + # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh + . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1 + exit 1 +fi + +# now that we have support code, let's abs MYNAME so we can use it later +MYNAME=$(hadoop_abs "${MYNAME}") + +if [[ $# = 0 ]]; then + hadoop_exit_with_usage 1 +fi + +HADOOP_SUBCMD=$1 +shift + +if hadoop_need_reexec hadoop "${HADOOP_SUBCMD}"; then + hadoop_uservar_su hadoop "${HADOOP_SUBCMD}" \ + "${MYNAME}" \ + "--reexec" \ + "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +HADOOP_SUBCMD_ARGS=("$@") + +if declare -f hadoop_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then + hadoop_debug "Calling dynamically: hadoop_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}" + "hadoop_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +else + hadoopcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +fi + +hadoop_add_client_opts + +if [[ ${HADOOP_WORKER_MODE} = true ]]; then + hadoop_common_worker_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +# everything is in globals at this point, so call the generic handler +hadoop_generic_java_subcmd_handler diff --git a/hadoop-3.4.0/bin/hadoop.cmd b/hadoop-3.4.0/bin/hadoop.cmd new file mode 100644 index 0000000..ed25183 --- /dev/null +++ b/hadoop-3.4.0/bin/hadoop.cmd @@ -0,0 +1,323 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + + +@rem This script runs the hadoop core commands. + +@rem Environment Variables +@rem +@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME. +@rem +@rem HADOOP_CLASSPATH Extra Java CLASSPATH entries. +@rem +@rem HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is +@rem added in the beginning of the global +@rem classpath. Can be defined, for example, +@rem by doing +@rem export HADOOP_USER_CLASSPATH_FIRST=true +@rem +@rem HADOOP_USE_CLIENT_CLASSLOADER When defined, HADOOP_CLASSPATH and the +@rem jar as the hadoop jar argument are +@rem handled by a separate isolated client +@rem classloader. If it is set, +@rem HADOOP_USER_CLASSPATH_FIRST is +@rem ignored. Can be defined by doing +@rem export HADOOP_USE_CLIENT_CLASSLOADER=true +@rem +@rem HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES +@rem When defined, it overrides the default +@rem definition of system classes for the +@rem client classloader when +@rem HADOOP_USE_CLIENT_CLASSLOADER is +@rem enabled. Names ending in '.' (period) +@rem are treated as package names, and names +@rem starting with a '-' are treated as +@rem negative matches. For example, +@rem export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop." + +@rem +@rem HADOOP_HEAPSIZE The maximum amount of heap to use, in MB. +@rem Default is 1000. +@rem +@rem HADOOP_OPTS Extra Java runtime options. +@rem +@rem HADOOP_CLIENT_OPTS when the respective command is run. +@rem HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker +@rem for e.g. HADOOP_CLIENT_OPTS applies to +@rem more than one command (fs, dfs, fsck, +@rem dfsadmin etc) +@rem +@rem HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. +@rem +@rem HADOOP_ROOT_LOGGER The root appender. Default is INFO,console +@rem + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +call :updatepath %HADOOP_BIN_PATH% + +:main + setlocal enabledelayedexpansion + + set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec + if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% + ) + + call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %* + if "%1" == "--config" ( + shift + shift + ) + if "%1" == "--loglevel" ( + shift + shift + ) + + set hadoop-command=%1 + if not defined hadoop-command ( + goto print_usage + ) + + call :make_command_arguments %* + + set hdfscommands=namenode secondarynamenode datanode dfs dfsadmin fsck balancer fetchdt oiv dfsgroups + for %%i in ( %hdfscommands% ) do ( + if %hadoop-command% == %%i set hdfscommand=true + ) + if defined hdfscommand ( + @echo DEPRECATED: Use of this script to execute hdfs command is deprecated. 1>&2 + @echo Instead use the hdfs command for it. 1>&2 + if exist %HADOOP_HDFS_HOME%\bin\hdfs.cmd ( + call %HADOOP_HDFS_HOME%\bin\hdfs.cmd %* + goto :eof + ) else if exist %HADOOP_HOME%\bin\hdfs.cmd ( + call %HADOOP_HOME%\bin\hdfs.cmd %* + goto :eof + ) else ( + echo HADOOP_HDFS_HOME not found! + goto :eof + ) + ) + + set mapredcommands=pipes job queue mrgroups mradmin jobtracker tasktracker + for %%i in ( %mapredcommands% ) do ( + if %hadoop-command% == %%i set mapredcommand=true + ) + if defined mapredcommand ( + @echo DEPRECATED: Use of this script to execute mapred command is deprecated. 1>&2 + @echo Instead use the mapred command for it. 1>&2 + if exist %HADOOP_MAPRED_HOME%\bin\mapred.cmd ( + call %HADOOP_MAPRED_HOME%\bin\mapred.cmd %* + goto :eof + ) else if exist %HADOOP_HOME%\bin\mapred.cmd ( + call %HADOOP_HOME%\bin\mapred.cmd %* + goto :eof + ) else ( + echo HADOOP_MAPRED_HOME not found! + goto :eof + ) + ) + + if %hadoop-command% == classpath ( + if not defined hadoop-command-arguments ( + @rem No need to bother starting up a JVM for this simple case. + @echo %CLASSPATH% + exit /b + ) + ) else if %hadoop-command% == jnipath ( + echo !PATH! + exit /b + ) + + set corecommands=fs version jar checknative conftest distch distcp daemonlog archive classpath credential kerbname key kdiag + for %%i in ( %corecommands% ) do ( + if %hadoop-command% == %%i set corecommand=true + ) + if defined corecommand ( + call :%hadoop-command% + ) else ( + set CLASSPATH=%CLASSPATH%;%CD% + set CLASS=%hadoop-command% + ) + + set path=%PATH%;%HADOOP_BIN_PATH% + + @rem Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + + @rem make sure security appender is turned off + if not defined HADOOP_SECURITY_LOGGER ( + set HADOOP_SECURITY_LOGGER=INFO,NullAppender + ) + set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% + + call %JAVA% %JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hadoop-command-arguments% + + exit /b %ERRORLEVEL% + +:fs + set CLASS=org.apache.hadoop.fs.FsShell + goto :eof + +:version + set CLASS=org.apache.hadoop.util.VersionInfo + goto :eof + +:jar + if defined YARN_OPTS ( + @echo WARNING: Use "yarn jar" to launch YARN applications. + ) else if defined YARN_CLIENT_OPTS ( + @echo WARNING: Use "yarn jar" to launch YARN applications. + ) + @rem if --help option is used, no need to call command + if [!hadoop-command-arguments[%1%]!]==["--help"] ( + @echo Usage: hadoop jar [mainClass] args... + goto :eof + ) + set CLASS=org.apache.hadoop.util.RunJar + goto :eof + +:checknative + set CLASS=org.apache.hadoop.util.NativeLibraryChecker + goto :eof + +:conftest + set CLASS=org.apache.hadoop.util.ConfTest + goto :eof + +:distch + set CLASS=org.apache.hadoop.tools.DistCh + set CLASSPATH=%CLASSPATH%;%TOOL_PATH% + goto :eof + +:distcp + set CLASS=org.apache.hadoop.tools.DistCp + set CLASSPATH=%CLASSPATH%;%TOOL_PATH% + goto :eof + +:daemonlog + set CLASS=org.apache.hadoop.log.LogLevel + goto :eof + +:archive + set CLASS=org.apache.hadoop.tools.HadoopArchives + set CLASSPATH=%CLASSPATH%;%TOOL_PATH% + goto :eof + +:classpath + set CLASS=org.apache.hadoop.util.Classpath + goto :eof + +:credential + set CLASS=org.apache.hadoop.security.alias.CredentialShell + goto :eof + +:kerbname + set CLASS=org.apache.hadoop.security.HadoopKerberosName + goto :eof + +:kdiag + set CLASS=org.apache.hadoop.security.KDiag + goto :eof + +:key + set CLASS=org.apache.hadoop.crypto.key.KeyShell + goto :eof + +:updatepath + set path_to_add=%* + set current_path_comparable=%path% + set current_path_comparable=%current_path_comparable: =_% + set current_path_comparable=%current_path_comparable:(=_% + set current_path_comparable=%current_path_comparable:)=_% + set path_to_add_comparable=%path_to_add% + set path_to_add_comparable=%path_to_add_comparable: =_% + set path_to_add_comparable=%path_to_add_comparable:(=_% + set path_to_add_comparable=%path_to_add_comparable:)=_% + + for %%i in ( %current_path_comparable% ) do ( + if /i "%%i" == "%path_to_add_comparable%" ( + set path_to_add_exist=true + ) + ) + set system_path_comparable= + set path_to_add_comparable= + if not defined path_to_add_exist path=%path_to_add%;%path% + set path_to_add= + goto :eof + +@rem This changes %1, %2 etc. Hence those cannot be used after calling this. +:make_command_arguments + if "%1" == "--config" ( + shift + shift + ) + if "%1" == "--loglevel" ( + shift + shift + ) + if [%2] == [] goto :eof + shift + set _arguments= + :MakeCmdArgsLoop + if [%1]==[] goto :EndLoop + + if not defined _arguments ( + set _arguments=%1 + ) else ( + set _arguments=!_arguments! %1 + ) + shift + goto :MakeCmdArgsLoop + :EndLoop + set hadoop-command-arguments=%_arguments% + goto :eof + +:print_usage + @echo Usage: hadoop [--config confdir] [--loglevel loglevel] COMMAND + @echo where COMMAND is one of: + @echo fs run a generic filesystem user client + @echo version print the version + @echo jar ^ run a jar file + @echo note: please use "yarn jar" to launch + @echo YARN applications, not this command. + @echo checknative [-a^|-h] check native hadoop and compression libraries availability + @echo conftest validate configuration XML files + @echo distch path:owner:group:permisson + @echo distributed metadata changer + @echo distcp ^ ^ copy file or directories recursively + @echo archive -archiveName NAME -p ^ ^* ^ create a hadoop archive + @echo classpath prints the class path needed to get the + @echo Hadoop jar and the required libraries + @echo credential interact with credential providers + @echo jnipath prints the java.library.path + @echo kerbname show auth_to_local principal conversion + @echo kdiag diagnose kerberos problems + @echo key manage keys via the KeyProvider + @echo daemonlog get/set the log level for each daemon + @echo or + @echo CLASSNAME run the class named CLASSNAME + @echo. + @echo Most commands print help when invoked w/o parameters. + +endlocal diff --git a/hadoop-3.4.0/bin/hadoop.dll b/hadoop-3.4.0/bin/hadoop.dll new file mode 100644 index 0000000..5d13a62 Binary files /dev/null and b/hadoop-3.4.0/bin/hadoop.dll differ diff --git a/hadoop-3.4.0/bin/hadoop.exp b/hadoop-3.4.0/bin/hadoop.exp new file mode 100644 index 0000000..8fbae74 Binary files /dev/null and b/hadoop-3.4.0/bin/hadoop.exp differ diff --git a/hadoop-3.4.0/bin/hadoop.lib b/hadoop-3.4.0/bin/hadoop.lib new file mode 100644 index 0000000..f5dc1c6 Binary files /dev/null and b/hadoop-3.4.0/bin/hadoop.lib differ diff --git a/hadoop-3.4.0/bin/hadoop.pdb b/hadoop-3.4.0/bin/hadoop.pdb new file mode 100644 index 0000000..97c1c19 Binary files /dev/null and b/hadoop-3.4.0/bin/hadoop.pdb differ diff --git a/hadoop-3.4.0/bin/hdfs b/hadoop-3.4.0/bin/hdfs new file mode 100644 index 0000000..7d3a047 --- /dev/null +++ b/hadoop-3.4.0/bin/hdfs @@ -0,0 +1,291 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The name of the script being executed. +HADOOP_SHELL_EXECNAME="hdfs" +MYNAME="${BASH_SOURCE-$0}" + +## @description build up the hdfs command's usage text. +## @audience public +## @stability stable +## @replaceable no +function hadoop_usage +{ + hadoop_add_option "--buildpaths" "attempt to add class files from build tree" + hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon" + hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker mode" + hadoop_add_option "--loglevel level" "set the log4j level for this command" + hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" + hadoop_add_option "--workers" "turn on worker mode" + + hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility" + hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache" + hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" + hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones" + hadoop_add_subcommand "datanode" daemon "run a DFS datanode" + hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug commands" + hadoop_add_subcommand "dfs" client "run a filesystem command on the file system" + hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client" + hadoop_add_subcommand "dfsrouter" daemon "run the DFS router" + hadoop_add_subcommand "dfsrouteradmin" admin "manage Router-based federation" + hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node" + hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI" + hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode" + hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility" + hadoop_add_subcommand "fsImageValidation" admin "run FsImageValidation to check an fsimage" + hadoop_add_subcommand "getconf" client "get config values from configuration" + hadoop_add_subcommand "groups" client "get the groups which users belong to" + hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client" + hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." + hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode" + hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user" + hadoop_add_subcommand "lsSnapshot" client "list all snapshots for a snapshottable directory" + hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types" + hadoop_add_subcommand "namenode" daemon "run the DFS namenode" + hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway" + hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an edits file" + hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage" + hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage" + hadoop_add_subcommand "portmap" daemon "run a portmap service" + hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode" + hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot" + hadoop_add_subcommand "storagepolicies" admin "list/get/set/satisfyStoragePolicy block storage policies" + hadoop_add_subcommand "sps" daemon "run external storagepolicysatisfier" + hadoop_add_subcommand "version" client "print the version" + hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon" + hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false +} + +## @description Default command handler for hadoop command +## @audience public +## @stability stable +## @replaceable no +## @param CLI arguments +function hdfscmd_case +{ + subcmd=$1 + shift + + case ${subcmd} in + balancer) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.balancer.Balancer + ;; + cacheadmin) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin + ;; + classpath) + hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@" + ;; + crypto) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CryptoAdmin + ;; + datanode) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode' + hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR + hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR + ;; + debug) + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DebugAdmin' + ;; + dfs) + HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell + ;; + dfsadmin) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin + ;; + dfsrouter) + hadoop_add_to_classpath_tools hadoop-federation-balance + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.DFSRouter' + ;; + dfsrouteradmin) + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin' + ;; + diskbalancer) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI + ;; + envvars) + echo "JAVA_HOME='${JAVA_HOME}'" + echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'" + echo "HDFS_DIR='${HDFS_DIR}'" + echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'" + echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'" + echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'" + echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'" + echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" + if [[ -n "${QATESTMODE}" ]]; then + echo "MYNAME=${MYNAME}" + echo "HADOOP_SHELL_EXECNAME=${HADOOP_SHELL_EXECNAME}" + fi + exit 0 + ;; + ec) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.ECAdmin + ;; + fetchdt) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher + ;; + fsck) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck + ;; + fsImageValidation) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.namenode.FsImageValidation + ;; + getconf) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetConf + ;; + groups) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetGroups + ;; + haadmin) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSHAAdmin + ;; + journalnode) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode' + ;; + jmxget) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet + ;; + lsSnapshottableDir) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir + ;; + lsSnapshot) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshot + ;; + mover) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover + ;; + namenode) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.NameNode' + hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}" + ;; + nfs3) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_SECURE_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3 + hadoop_deprecate_envvar HADOOP_SECURE_NFS3_LOG_DIR HADOOP_SECURE_LOG_DIR + hadoop_deprecate_envvar HADOOP_SECURE_NFS3_PID_DIR HADOOP_SECURE_PID_DIR + ;; + oev) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer + ;; + oiv) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB + ;; + oiv_legacy) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer + ;; + portmap) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap + ;; + secondarynamenode) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' + hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}" + ;; + snapshotDiff) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff + ;; + storagepolicies) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin + ;; + sps) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.sps.ExternalStoragePolicySatisfier + ;; + version) + HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo + ;; + zkfc) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DFSZKFailoverController' + ;; + *) + HADOOP_CLASSNAME="${subcmd}" + if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then + hadoop_exit_with_usage 1 + fi + ;; + esac +} + +# let's locate libexec... +if [[ -n "${HADOOP_HOME}" ]]; then + HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" +else + bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P) + HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + # shellcheck source=./hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi + +# now that we have support code, let's abs MYNAME so we can use it later +MYNAME=$(hadoop_abs "${MYNAME}") + +if [[ $# = 0 ]]; then + hadoop_exit_with_usage 1 +fi + +HADOOP_SUBCMD=$1 +shift + +if hadoop_need_reexec hdfs "${HADOOP_SUBCMD}"; then + hadoop_uservar_su hdfs "${HADOOP_SUBCMD}" \ + "${MYNAME}" \ + "--reexec" \ + "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +HADOOP_SUBCMD_ARGS=("$@") + +if declare -f hdfs_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then + hadoop_debug "Calling dynamically: hdfs_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}" + "hdfs_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +else + hdfscmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +fi + +hadoop_add_client_opts + +if [[ ${HADOOP_WORKER_MODE} = true ]]; then + hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/hdfs" "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +# everything is in globals at this point, so call the generic handler +hadoop_generic_java_subcmd_handler diff --git a/hadoop-3.4.0/bin/hdfs.cmd b/hadoop-3.4.0/bin/hdfs.cmd new file mode 100644 index 0000000..21d4de7 --- /dev/null +++ b/hadoop-3.4.0/bin/hdfs.cmd @@ -0,0 +1,270 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +setlocal enabledelayedexpansion + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %* +if "%1" == "--config" ( + shift + shift +) +if "%1" == "--loglevel" ( + shift + shift +) + +:main + if exist %HADOOP_CONF_DIR%\hadoop-env.cmd ( + call %HADOOP_CONF_DIR%\hadoop-env.cmd + ) + + set hdfs-command=%1 + call :make_command_arguments %* + + if not defined hdfs-command ( + goto print_usage + ) + + if %hdfs-command% == classpath ( + if not defined hdfs-command-arguments ( + @rem No need to bother starting up a JVM for this simple case. + @echo %CLASSPATH% + exit /b + ) + ) + + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck fsImageValidation balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir lsSnapshot cacheadmin mover storagepolicies classpath crypto dfsrouter dfsrouteradmin debug + for %%i in ( %hdfscommands% ) do ( + if %hdfs-command% == %%i set hdfscommand=true + ) + if defined hdfscommand ( + call :%hdfs-command% + ) else ( + set CLASSPATH=%CLASSPATH%;%CD% + set CLASS=%hdfs-command% + ) + + set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hdfs-command-arguments% + call %JAVA% %java_arguments% + +goto :eof + +:namenode + set CLASS=org.apache.hadoop.hdfs.server.namenode.NameNode + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_NAMENODE_OPTS% + goto :eof + +:journalnode + set CLASS=org.apache.hadoop.hdfs.qjournal.server.JournalNode + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_JOURNALNODE_OPTS% + goto :eof + +:zkfc + set CLASS=org.apache.hadoop.hdfs.tools.DFSZKFailoverController + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ZKFC_OPTS% + goto :eof + +:secondarynamenode + set CLASS=org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_SECONDARYNAMENODE_OPTS% + goto :eof + +:datanode + set CLASS=org.apache.hadoop.hdfs.server.datanode.DataNode + set HADOOP_OPTS=%HADOOP_OPTS% -server %HADOOP_DATANODE_OPTS% + goto :eof + +:dfs + set CLASS=org.apache.hadoop.fs.FsShell + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:dfsadmin + set CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:haadmin + set CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin + set CLASSPATH=%CLASSPATH%;%TOOL_PATH% + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:fsck + set CLASS=org.apache.hadoop.hdfs.tools.DFSck + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:fsImageValidation + set CLASS=org.apache.hadoop.hdfs.server.namenode.FsImageValidation + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:balancer + set CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_BALANCER_OPTS% + goto :eof + +:jmxget + set CLASS=org.apache.hadoop.hdfs.tools.JMXGet + goto :eof + +:classpath + set CLASS=org.apache.hadoop.util.Classpath + goto :eof + +:oiv + set CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB + goto :eof + +:oev + set CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer + goto :eof + +:fetchdt + set CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher + goto :eof + +:getconf + set CLASS=org.apache.hadoop.hdfs.tools.GetConf + goto :eof + +:groups + set CLASS=org.apache.hadoop.hdfs.tools.GetGroups + goto :eof + +:snapshotDiff + set CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff + goto :eof + +:lsSnapshottableDir + set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir + goto :eof + +:lsSnapshot + set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshot + goto :eof + +:cacheadmin + set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin + goto :eof + +:mover + set CLASS=org.apache.hadoop.hdfs.server.mover.Mover + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS% + goto :eof + +:storagepolicies + set CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin + goto :eof + +:crypto + set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin + goto :eof + +:dfsrouter + set CLASS=org.apache.hadoop.hdfs.server.federation.router.DFSRouter + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS% + goto :eof + +:dfsrouteradmin + set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS% + goto :eof + +:debug + set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin + goto :eof + + +@rem This changes %1, %2 etc. Hence those cannot be used after calling this. +:make_command_arguments + if "%1" == "--config" ( + shift + shift + ) + if "%1" == "--loglevel" ( + shift + shift + ) + if [%2] == [] goto :eof + shift + set _hdfsarguments= + :MakeCmdArgsLoop + if [%1]==[] goto :EndLoop + + if not defined _hdfsarguments ( + set _hdfsarguments=%1 + ) else ( + set _hdfsarguments=!_hdfsarguments! %1 + ) + shift + goto :MakeCmdArgsLoop + :EndLoop + set hdfs-command-arguments=%_hdfsarguments% + goto :eof + +:print_usage + @echo Usage: hdfs [--config confdir] [--loglevel loglevel] COMMAND + @echo where COMMAND is one of: + @echo dfs run a filesystem command on the file systems supported in Hadoop. + @echo namenode -format format the DFS filesystem + @echo secondarynamenode run the DFS secondary namenode + @echo namenode run the DFS namenode + @echo journalnode run the DFS journalnode + @echo dfsrouter run the DFS router + @echo dfsrouteradmin manage Router-based federation + @echo zkfc run the ZK Failover Controller daemon + @echo datanode run a DFS datanode + @echo dfsadmin run a DFS admin client + @echo haadmin run a DFS HA admin client + @echo fsck run a DFS filesystem checking utility + @echo fsImageValidation run FsImageValidation to check an fsimage + @echo balancer run a cluster balancing utility + @echo jmxget get JMX exported values from NameNode or DataNode. + @echo oiv apply the offline fsimage viewer to an fsimage + @echo oev apply the offline edits viewer to an edits file + @echo fetchdt fetch a delegation token from the NameNode + @echo getconf get config values from configuration + @echo groups get the groups which users belong to + @echo snapshotDiff diff two snapshots of a directory or diff the + @echo current directory contents with a snapshot + @echo lsSnapshottableDir list all snapshottable dirs owned by the current user + @echo Use -help to see options + @echo lsSnapshot list all snapshots for a snapshottable dir + @echo Use -help to see options + @echo cacheadmin configure the HDFS cache + @echo crypto configure HDFS encryption zones + @echo mover run a utility to move block replicas across storage types + @echo storagepolicies list/get/set block storage policies + @echo. + @echo Most commands print help when invoked w/o parameters. + +@rem There are also debug commands, but they don't show up in this listing. +endlocal diff --git a/hadoop-3.4.0/bin/hdfs.dll b/hadoop-3.4.0/bin/hdfs.dll new file mode 100644 index 0000000..d925fe5 Binary files /dev/null and b/hadoop-3.4.0/bin/hdfs.dll differ diff --git a/hadoop-3.4.0/bin/hdfs.exp b/hadoop-3.4.0/bin/hdfs.exp new file mode 100644 index 0000000..92cf623 Binary files /dev/null and b/hadoop-3.4.0/bin/hdfs.exp differ diff --git a/hadoop-3.4.0/bin/hdfs.lib b/hadoop-3.4.0/bin/hdfs.lib new file mode 100644 index 0000000..f705cc8 Binary files /dev/null and b/hadoop-3.4.0/bin/hdfs.lib differ diff --git a/hadoop-3.4.0/bin/hdfs.pdb b/hadoop-3.4.0/bin/hdfs.pdb new file mode 100644 index 0000000..ddc27b8 Binary files /dev/null and b/hadoop-3.4.0/bin/hdfs.pdb differ diff --git a/hadoop-3.4.0/bin/libwinutils.lib b/hadoop-3.4.0/bin/libwinutils.lib new file mode 100644 index 0000000..baad94c Binary files /dev/null and b/hadoop-3.4.0/bin/libwinutils.lib differ diff --git a/hadoop-3.4.0/bin/libwinutils.pdb b/hadoop-3.4.0/bin/libwinutils.pdb new file mode 100644 index 0000000..0b1b335 Binary files /dev/null and b/hadoop-3.4.0/bin/libwinutils.pdb differ diff --git a/hadoop-3.4.0/bin/mapred b/hadoop-3.4.0/bin/mapred new file mode 100644 index 0000000..3e52556 --- /dev/null +++ b/hadoop-3.4.0/bin/mapred @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The name of the script being executed. +HADOOP_SHELL_EXECNAME="mapred" +MYNAME="${BASH_SOURCE-$0}" + + +## @description build up the mapred command's usage text. +## @audience public +## @stability stable +## @replaceable no +function hadoop_usage +{ + hadoop_add_subcommand "classpath" client "prints the class path needed for running mapreduce subcommands" + hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "historyserver" daemon "run job history servers as a standalone daemon" + hadoop_add_subcommand "hsadmin" admin "job history server admin interface" + hadoop_add_subcommand "job" client "manipulate MapReduce jobs" + hadoop_add_subcommand "pipes" client "run a Pipes job" + hadoop_add_subcommand "queue" client "get information regarding JobQueues" + hadoop_add_subcommand "sampler" client "sampler" + hadoop_add_subcommand "frameworkuploader" admin "mapreduce framework upload" + hadoop_add_subcommand "version" client "print the version" + hadoop_add_subcommand "minicluster" client "CLI MiniCluster" + hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true +} + +## @description Default command handler for hadoop command +## @audience public +## @stability stable +## @replaceable no +## @param CLI arguments +function mapredcmd_case +{ + subcmd=$1 + shift + + case ${subcmd} in + mradmin|jobtracker|tasktracker|groups) + hadoop_error "Sorry, the ${subcmd} command is no longer supported." + hadoop_error "You may find similar functionality with the \"yarn\" shell command." + hadoop_exit_with_usage 1 + ;; + classpath) + hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@" + ;; + envvars) + echo "JAVA_HOME='${JAVA_HOME}'" + echo "HADOOP_MAPRED_HOME='${HADOOP_MAPRED_HOME}'" + echo "MAPRED_DIR='${MAPRED_DIR}'" + echo "MAPRED_LIB_JARS_DIR='${MAPRED_LIB_JARS_DIR}'" + echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'" + echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'" + echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'" + echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" + exit 0 + ;; + historyserver) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer + if [[ -n "${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}" ]]; then + HADOOP_HEAPSIZE_MAX="${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}" + fi + HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_JHS_LOGGER:-$HADOOP_DAEMON_ROOT_LOGGER} + if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then + hadoop_add_param HADOOP_OPTS mapred.jobsummary.logger "-Dmapred.jobsummary.logger=${HADOOP_DAEMON_ROOT_LOGGER}" + fi + ;; + hsadmin) + HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin + ;; + job) + HADOOP_CLASSNAME=org.apache.hadoop.mapred.JobClient + ;; + pipes) + HADOOP_CLASSNAME=org.apache.hadoop.mapred.pipes.Submitter + ;; + queue) + HADOOP_CLASSNAME=org.apache.hadoop.mapred.JobQueueClient + ;; + sampler) + HADOOP_CLASSNAME=org.apache.hadoop.mapred.lib.InputSampler + ;; + frameworkuploader) + HADOOP_CLASSNAME=org.apache.hadoop.mapred.uploader.FrameworkUploader + ;; + version) + HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo + ;; + minicluster) + hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/timelineservice"'/*' + hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/test"'/*' + junitjar=$(echo "${HADOOP_TOOLS_LIB_JARS_DIR}"/junit-[0-9]*.jar) + hadoop_add_classpath "${junitjar}" + HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.MiniHadoopClusterManager + ;; + *) + HADOOP_CLASSNAME="${subcmd}" + if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then + hadoop_exit_with_usage 1 + fi + ;; + esac +} + +bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P) + +# let's locate libexec... +if [[ -n "${HADOOP_HOME}" ]]; then + HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" +else + HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/mapred-config.sh" ]]; then + # shellcheck source=./hadoop-mapreduce-project/bin/mapred-config.sh + . "${HADOOP_LIBEXEC_DIR}/mapred-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/mapred-config.sh." 2>&1 + exit 1 +fi + +# now that we have support code, let's abs MYNAME so we can use it later +MYNAME=$(hadoop_abs "${MYNAME}") + +if [ $# = 0 ]; then + hadoop_exit_with_usage 1 +fi + +HADOOP_SUBCMD=$1 +shift + +if hadoop_need_reexec mapred "${HADOOP_SUBCMD}"; then + hadoop_uservar_su mapred "${HADOOP_SUBCMD}" \ + "${MYNAME}" \ + "--reexec" \ + "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +HADOOP_SUBCMD_ARGS=("$@") + +if declare -f mapred_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then + hadoop_debug "Calling dynamically: mapred_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}" + "mapred_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +else + mapredcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +fi + +hadoop_add_client_opts + +if [[ ${HADOOP_WORKER_MODE} = true ]]; then + hadoop_common_worker_mode_execute "${HADOOP_MAPRED_HOME}/bin/mapred" "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +# everything is in globals at this point, so call the generic handler +hadoop_generic_java_subcmd_handler diff --git a/hadoop-3.4.0/bin/mapred.cmd b/hadoop-3.4.0/bin/mapred.cmd new file mode 100644 index 0000000..4085599 --- /dev/null +++ b/hadoop-3.4.0/bin/mapred.cmd @@ -0,0 +1,217 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +@rem The Hadoop mapred command script + +setlocal enabledelayedexpansion + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~`%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %DEFAULT_LIBEXEC_DIR%\mapred-config.cmd %* +if "%1" == "--config" ( + shift + shift +) + +if "%1" == "--loglevel" ( + shift + shift +) + +:main + if exist %MAPRED_CONF_DIR%\mapred-env.cmd ( + call %MAPRED_CONF_DIR%\mapred-env.cmd + ) + set mapred-command=%1 + call :make_command_arguments %* + + if not defined mapred-command ( + goto print_usage + ) + + @rem JAVA and JAVA_HEAP_MAX are set in hadoop-confg.cmd + + if defined MAPRED_HEAPSIZE ( + @rem echo run with Java heapsize %MAPRED_HEAPSIZE% + set JAVA_HEAP_SIZE=-Xmx%MAPRED_HEAPSIZE%m + ) + + @rem CLASSPATH initially contains HADOOP_CONF_DIR and MAPRED_CONF_DIR + if not defined HADOOP_CONF_DIR ( + echo NO HADOOP_CONF_DIR set. + echo Please specify it either in mapred-env.cmd or in the environment. + goto :eof + ) + + set CLASSPATH=%HADOOP_CONF_DIR%;%MAPRED_CONF_DIR%;%CLASSPATH% + + @rem for developers, add Hadoop classes to CLASSPATH + if exist %HADOOP_MAPRED_HOME%\build\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\classes + ) + + if exist %HADOOP_MAPRED_HOME%\build\webapps ( + set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build + ) + + if exist %HADOOP_MAPRED_HOME%\build\test\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\test\classes + ) + + if exist %HADOOP_MAPRED_HOME%\build\tools ( + set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\build\tools + ) + + @rem Need YARN jars also + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\* + + @rem add libs to CLASSPATH + set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\* + + @rem add modules to CLASSPATH + set CLASSPATH=%CLASSPATH%;%HADOOP_MAPRED_HOME%\modules\* + + if %mapred-command% == classpath ( + if not defined mapred-command-arguments ( + @rem No need to bother starting up a JVM for this simple case. + @echo %CLASSPATH% + exit /b + ) + ) + + call :%mapred-command% %mapred-command-arguments% + set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %mapred-command-arguments% + call %JAVA% %java_arguments% + +goto :eof + + +:classpath + set CLASS=org.apache.hadoop.util.Classpath + goto :eof + +:job + set CLASS=org.apache.hadoop.mapred.JobClient + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:queue + set CLASS=org.apache.hadoop.mapred.JobQueueClient + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:sampler + set CLASS=org.apache.hadoop.mapred.lib.InputSampler + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:historyserver + set CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer + set HADOOP_OPTS=%HADOOP_OPTS% -Dmapred.jobsummary.logger=%HADOOP_JHS_LOGGER% %HADOOP_JOB_HISTORYSERVER_OPTS% + if defined HADOOP_JOB_HISTORYSERVER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%HADOOP_JOB_HISTORYSERVER_HEAPSIZE%m + ) + goto :eof + +:distcp + set CLASS=org.apache.hadoop.tools.DistCp + set CLASSPATH=%CLASSPATH%;%TOO_PATH% + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + goto :eof + +:archive + set CLASS=org.apache.hadop.tools.HadoopArchives + set CLASSPATH=%CLASSPATH%;%TOO_PATH% + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + +:hsadmin + set CLASS=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% + +:pipes + goto not_supported + +:mradmin + goto not_supported + +:jobtracker + goto not_supported + +:tasktracker + goto not_supported + +:groups + goto not_supported + + +@rem This changes %1, %2 etc. Hence those cannot be used after calling this. +:make_command_arguments + if [%2] == [] goto :eof + if "%1" == "--config" ( + shift + shift + ) + if "%1" == "--loglevel" ( + shift + shift + ) + shift + set _mapredarguments= + :MakeCmdArgsLoop + if [%1]==[] goto :EndLoop + + if not defined _mapredarguments ( + set _mapredarguments=%1 + ) else ( + set _mapredarguments=!_mapredarguments! %1 + ) + shift + goto :MakeCmdArgsLoop + :EndLoop + set mapred-command-arguments=%_mapredarguments% + goto :eof + +:not_supported + @echo Sorry, the %COMMAND% command is no longer supported. + @echo You may find similar functionality with the "yarn" shell command. + goto print_usage + +:print_usage + @echo Usage: mapred [--config confdir] [--loglevel loglevel] COMMAND + @echo where COMMAND is one of: + @echo job manipulate MapReduce jobs + @echo queue get information regarding JobQueues + @echo classpath prints the class path needed for running + @echo mapreduce subcommands + @echo historyserver run job history servers as a standalone daemon + @echo distcp ^ ^ copy file or directories recursively + @echo archive -archiveName NAME -p ^ ^* ^ create a hadoop archive + @echo hsadmin job history server admin interface + @echo + @echo Most commands print help when invoked w/o parameters. + +endlocal diff --git a/hadoop-3.4.0/bin/winutils.exe b/hadoop-3.4.0/bin/winutils.exe new file mode 100644 index 0000000..c23b2a0 Binary files /dev/null and b/hadoop-3.4.0/bin/winutils.exe differ diff --git a/hadoop-3.4.0/bin/winutils.pdb b/hadoop-3.4.0/bin/winutils.pdb new file mode 100644 index 0000000..675f731 Binary files /dev/null and b/hadoop-3.4.0/bin/winutils.pdb differ diff --git a/hadoop-3.4.0/bin/yarn b/hadoop-3.4.0/bin/yarn new file mode 100644 index 0000000..6ef4e2c --- /dev/null +++ b/hadoop-3.4.0/bin/yarn @@ -0,0 +1,334 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The name of the script being executed. +HADOOP_SHELL_EXECNAME="yarn" +MYNAME="${BASH_SOURCE-$0}" + +## @description build up the yarn command's usage text. +## @audience public +## @stability stable +## @replaceable no +function hadoop_usage +{ + hadoop_add_option "--buildpaths" "attempt to add class files from build tree" + hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon" + hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker mode" + hadoop_add_option "--loglevel level" "set the log4j level for this command" + hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" + hadoop_add_option "--workers" "turn on worker mode" + + hadoop_add_subcommand "app|application" client "prints application(s) report/kill application/manage long running application" + hadoop_add_subcommand "applicationattempt" client "prints applicationattempt(s) report" + hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" + hadoop_add_subcommand "cluster" client "prints cluster information" + hadoop_add_subcommand "container" client "prints container(s) report" + hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon" + hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "globalpolicygenerator" daemon "run the Global Policy Generator" + hadoop_add_subcommand "jar " client "run a jar file" + hadoop_add_subcommand "logs" client "dump container logs" + hadoop_add_subcommand "node" admin "prints node report(s)" + hadoop_add_subcommand "nodemanager" daemon "run a nodemanager on each worker" + hadoop_add_subcommand "proxyserver" daemon "run the web app proxy server" + hadoop_add_subcommand "queue" client "prints queue information" + hadoop_add_subcommand "registrydns" daemon "run the registry DNS server" + hadoop_add_subcommand "resourcemanager" daemon "run the ResourceManager" + hadoop_add_subcommand "fs2cs" client "converts Fair Scheduler configuration to Capacity Scheduler (EXPERIMENTAL)" + hadoop_add_subcommand "rmadmin" admin "admin tools" + hadoop_add_subcommand "router" daemon "run the Router daemon" + hadoop_add_subcommand "routeradmin" admin "router admin tools" + hadoop_add_subcommand "schedulerconf" client "Updates scheduler configuration" + hadoop_add_subcommand "scmadmin" admin "SharedCacheManager admin tools" + hadoop_add_subcommand "sharedcachemanager" daemon "run the SharedCacheManager daemon" + hadoop_add_subcommand "timelinereader" client "run the timeline reader server" + hadoop_add_subcommand "timelineserver" daemon "run the timeline server" + hadoop_add_subcommand "top" client "view cluster information" + hadoop_add_subcommand "nodeattributes" client "node attributes cli client" + hadoop_add_subcommand "version" client "print the version" + hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true +} + +## @description Default command handler for yarn command +## @audience public +## @stability stable +## @replaceable no +## @param CLI arguments +function yarncmd_case +{ + subcmd=$1 + shift + + case ${subcmd} in + app|application|applicationattempt|container) + HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ApplicationCLI + set -- "${subcmd}" "$@" + HADOOP_SUBCMD_ARGS=("$@") + local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\ +${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\ +${HADOOP_HDFS_HOME}/${HDFS_DIR},\ +${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\ +${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\ +${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}" + hadoop_translate_cygwin_path sld + hadoop_add_param HADOOP_OPTS service.libdir "-Dservice.libdir=${sld}" + ;; + classpath) + hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@" + ;; + cluster) + HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ClusterCLI + ;; + daemonlog) + HADOOP_CLASSNAME=org.apache.hadoop.log.LogLevel + ;; + envvars) + echo "JAVA_HOME='${JAVA_HOME}'" + echo "HADOOP_YARN_HOME='${HADOOP_YARN_HOME}'" + echo "YARN_DIR='${YARN_DIR}'" + echo "YARN_LIB_JARS_DIR='${YARN_LIB_JARS_DIR}'" + echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'" + echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'" + echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'" + echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" + exit 0 + ;; + globalpolicygenerator) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator' + # Backwards compatibility + if [[ -n "${YARN_GLOBALPOLICYGENERATOR_HEAPSIZE}" ]]; then + HADOOP_HEAPSIZE_MAX="${YARN_GLOBALPOLICYGENERATOR_HEAPSIZE}" + fi + ;; + jar) + HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar + ;; + historyserver) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + echo "DEPRECATED: Use of this command to start the timeline server is deprecated." 1>&2 + echo "Instead use the timelineserver command for it." 1>&2 + echo "Starting the History Server anyway..." 1>&2 + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer' + ;; + logs) + HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.LogsCLI + ;; + node) + HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.NodeCLI + ;; + nodemanager) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*" + hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*" before + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.nodemanager.NodeManager' + # Backwards compatibility + if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then + HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}" + fi + ;; + proxyserver) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer' + # Backwards compatibility + if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then + HADOOP_HEAPSIZE_MAX="${YARN_PROXYSERVER_HEAPSIZE}" + fi + ;; + queue) + HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.QueueCLI + ;; + registrydns) + echo "DEPRECATED: Use of this command is deprecated." 1>&2 + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_SECURE_CLASSNAME='org.apache.hadoop.registry.server.dns.PrivilegedRegistryDNSStarter' + HADOOP_CLASSNAME='org.apache.hadoop.registry.server.dns.RegistryDNSServer' + ;; + resourcemanager) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*" + hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*" before + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager' + # Backwards compatibility + if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then + HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}" + fi + local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\ +${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\ +${HADOOP_HDFS_HOME}/${HDFS_DIR},\ +${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\ +${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\ +${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}" + hadoop_translate_cygwin_path sld + hadoop_add_param HADOOP_OPTS service.libdir "-Dservice.libdir=${sld}" + ;; + fs2cs) + HADOOP_CLASSNAME="org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter.FSConfigToCSConfigConverterMain" + ;; + rmadmin) + HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI' + ;; + router) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.router.Router' + # Backwards compatibility + if [[ -n "${YARN_ROUTER_HEAPSIZE}" ]]; then + HADOOP_HEAPSIZE_MAX="${YARN_ROUTER_HEAPSIZE}" + fi + ;; + routeradmin) + HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RouterCLI' + ;; + schedulerconf) + HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.SchedConfCLI' + ;; + scmadmin) + HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin' + ;; + sharedcachemanager) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager' + ;; + timelinereader) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*" + hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*" before + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer' + ;; + nodeattributes) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="false" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.NodeAttributesCLI' + ;; + timelineserver) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer' + # Backwards compatibility + if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then + HADOOP_HEAPSIZE_MAX="${YARN_TIMELINESERVER_HEAPSIZE}" + fi + ;; + version) + HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo + ;; + top) + doNotSetCols=0 + doNotSetRows=0 + for i in "$@"; do + if [[ $i == "-cols" ]]; then + doNotSetCols=1 + fi + if [[ $i == "-rows" ]]; then + doNotSetRows=1 + fi + done + if [ $doNotSetCols == 0 ] && [ -n "${TERM}" ]; then + cols=$(tput cols) + if [ -n "$cols" ]; then + args=( $@ ) + args=("${args[@]}" "-cols" "$cols") + set -- "${args[@]}" + fi + fi + if [ $doNotSetRows == 0 ] && [ -n "${TERM}" ]; then + rows=$(tput lines) + if [ -n "$rows" ]; then + args=( $@ ) + args=("${args[@]}" "-rows" "$rows") + set -- "${args[@]}" + fi + fi + HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.TopCLI + HADOOP_SUBCMD_ARGS=("$@") + ;; + *) + HADOOP_CLASSNAME="${subcmd}" + if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then + hadoop_exit_with_usage 1 + fi + ;; + esac +} + +# let's locate libexec... +if [[ -n "${HADOOP_HOME}" ]]; then + HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" +else + bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P) + HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/yarn-config.sh" ]]; then + # shellcheck source=./hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh + . "${HADOOP_LIBEXEC_DIR}/yarn-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/yarn-config.sh." 2>&1 + exit 1 +fi + +# now that we have support code, let's abs MYNAME so we can use it later +MYNAME=$(hadoop_abs "${MYNAME}") + +# if no args specified, show usage +if [[ $# = 0 ]]; then + hadoop_exit_with_usage 1 +fi + +# get arguments +HADOOP_SUBCMD=$1 +shift + +if hadoop_need_reexec yarn "${HADOOP_SUBCMD}"; then + hadoop_uservar_su yarn "${HADOOP_SUBCMD}" \ + "${MYNAME}" \ + "--reexec" \ + "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +HADOOP_SUBCMD_ARGS=("$@") + +if declare -f yarn_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then + hadoop_debug "Calling dynamically: yarn_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}" + "yarn_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +else + yarncmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" +fi + +# It's unclear if YARN_CLIENT_OPTS is actually a useful +# thing to have separate from HADOOP_CLIENT_OPTS. Someone +# might use it, so let's not deprecate it and just override +# HADOOP_CLIENT_OPTS instead before we (potentially) add it +# to the command line +if [[ -n "${YARN_CLIENT_OPTS}" ]]; then + HADOOP_CLIENT_OPTS=${YARN_CLIENT_OPTS} +fi + +hadoop_add_client_opts + +if [[ ${HADOOP_WORKER_MODE} = true ]]; then + hadoop_common_worker_mode_execute "${HADOOP_YARN_HOME}/bin/yarn" "${HADOOP_USER_PARAMS[@]}" + exit $? +fi + +hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" + +# everything is in globals at this point, so call the generic handler +hadoop_generic_java_subcmd_handler diff --git a/hadoop-3.4.0/bin/yarn.cmd b/hadoop-3.4.0/bin/yarn.cmd new file mode 100644 index 0000000..89dc5a3 --- /dev/null +++ b/hadoop-3.4.0/bin/yarn.cmd @@ -0,0 +1,385 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +@rem The Hadoop command script +@rem +@rem Environment Variables +@rem +@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME. +@rem +@rem YARN_CLASSPATH Extra Java CLASSPATH entries. +@rem +@rem YARN_HEAPSIZE The maximum amount of heap to use, in MB. +@rem Default is 1000. +@rem +@rem YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command +@rem eg YARN_NODEMANAGER_HEAPSIZE sets the heap +@rem size for the NodeManager. If you set the +@rem heap size in YARN_{COMMAND}_OPTS or YARN_OPTS +@rem they take precedence. +@rem +@rem YARN_OPTS Extra Java runtime options. +@rem +@rem YARN_CLIENT_OPTS when the respective command is run. +@rem YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager +@rem for e.g. YARN_CLIENT_OPTS applies to +@rem more than one command (fs, dfs, fsck, +@rem dfsadmin etc) +@rem +@rem YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf. +@rem +@rem YARN_ROOT_LOGGER The root appender. Default is INFO,console +@rem + +setlocal enabledelayedexpansion + +if not defined HADOOP_BIN_PATH ( + set HADOOP_BIN_PATH=%~dp0 +) + +if "%HADOOP_BIN_PATH:~-1%" == "\" ( + set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1% +) + +set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec +if not defined HADOOP_LIBEXEC_DIR ( + set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR% +) + +call %DEFAULT_LIBEXEC_DIR%\yarn-config.cmd %* +if "%1" == "--config" ( + shift + shift +) +if "%1" == "--loglevel" ( + shift + shift +) + +:main + if exist %YARN_CONF_DIR%\yarn-env.cmd ( + call %YARN_CONF_DIR%\yarn-env.cmd + ) + + set yarn-command=%1 + call :make_command_arguments %* + + if not defined yarn-command ( + goto print_usage + ) + + @rem JAVA and JAVA_HEAP_MAX and set in hadoop-config.cmd + + if defined YARN_HEAPSIZE ( + @rem echo run with Java heapsize %YARN_HEAPSIZE% + set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m + ) + + @rem CLASSPATH initially contains HADOOP_CONF_DIR & YARN_CONF_DIR + if not defined HADOOP_CONF_DIR ( + echo No HADOOP_CONF_DIR set. + echo Please specify it either in yarn-env.cmd or in the environment. + goto :eof + ) + + set CLASSPATH=%HADOOP_CONF_DIR%;%YARN_CONF_DIR%;%CLASSPATH% + + @rem for developers, add Hadoop classes to CLASSPATH + if exist %HADOOP_YARN_HOME%\yarn-api\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-api\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-common\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-common\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-mapreduce\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-mapreduce\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-master-worker\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-master-worker\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes + ) + + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes + ) + + if exist %HADOOP_YARN_HOME%\build\test\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes + ) + + if exist %HADOOP_YARN_HOME%\build\tools ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\tools + ) + + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\* + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\* + + if %yarn-command% == classpath ( + if not defined yarn-command-arguments ( + @rem No need to bother starting up a JVM for this simple case. + @echo %CLASSPATH% + exit /b + ) + ) + + set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^ + application applicationattempt container node queue logs daemonlog historyserver ^ + timelineserver timelinereader router globalpolicygenerator classpath + for %%i in ( %yarncommands% ) do ( + if %yarn-command% == %%i set yarncommand=true + ) + if defined yarncommand ( + call :%yarn-command% + ) else ( + set CLASSPATH=%CLASSPATH%;%CD% + set CLASS=%yarn-command% + ) + + if defined JAVA_LIBRARY_PATH ( + set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% + ) + + set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments% + call %JAVA% %java_arguments% + +goto :eof + +:classpath + set CLASS=org.apache.hadoop.util.Classpath + goto :eof + +:rmadmin + set CLASS=org.apache.hadoop.yarn.client.cli.RMAdminCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:application + set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + set yarn-command-arguments=%yarn-command% %yarn-command-arguments% + goto :eof + +:applicationattempt + set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + set yarn-command-arguments=%yarn-command% %yarn-command-arguments% + goto :eof + +:cluster + set CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:container + set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + set yarn-command-arguments=%yarn-command% %yarn-command-arguments% + goto :eof + +:node + set CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:queue + set CLASS=org.apache.hadoop.yarn.client.cli.QueueCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:resourcemanager + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\rm-config\log4j.properties + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\timelineservice\* + set CLASSPATH=%HADOOP_YARN_HOME%\%YARN_DIR%\timelineservice\lib\*;%CLASSPATH% + set CLASS=org.apache.hadoop.yarn.server.resourcemanager.ResourceManager + set YARN_OPTS=%YARN_OPTS% %YARN_RESOURCEMANAGER_OPTS% + if defined YARN_RESOURCEMANAGER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_RESOURCEMANAGER_HEAPSIZE%m + ) + goto :eof + +:historyserver + @echo DEPRECATED: Use of this command to start the timeline server is deprecated. 1>&2 + @echo Instead use the timelineserver command for it. 1>&2 + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties + set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer + set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS% + if defined YARN_HISTORYSERVER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m + ) + goto :eof + +:timelineserver + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\timelineserver-config\log4j.properties + set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer + set YARN_OPTS=%YARN_OPTS% %HADOOP_TIMELINESERVER_OPTS% + if defined YARN_TIMELINESERVER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_TIMELINESERVER_HEAPSIZE%m + ) + goto :eof + +:timelinereader + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\timelineserver-config\log4j.properties + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\timelineservice\* + set CLASSPATH=%HADOOP_YARN_HOME%\%YARN_DIR%\timelineservice\lib\*;%CLASSPATH% + set CLASS=org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer + set YARN_OPTS=%YARN_OPTS% %YARN_TIMELINEREADER_OPTS% + goto :eof + +:router + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\router-config\log4j.properties + set CLASS=org.apache.hadoop.yarn.server.router.Router + set YARN_OPTS=%YARN_OPTS% %HADOOP_ROUTER_OPTS% + if defined YARN_ROUTER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_ROUTER_HEAPSIZE%m + ) + goto :eof + +:globalpolicygenerator + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\globalpolicygenerator-config\log4j.properties + set CLASS=org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator + set YARN_OPTS=%YARN_OPTS% %YARN_GLOBALPOLICYGENERATOR_OPTS% + if defined YARN_GLOBALPOLICYGENERATOR_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_GLOBALPOLICYGENERATOR_HEAPSIZE%m + ) + goto :eof + +:routeradmin + set CLASS=org.apache.hadoop.yarn.client.cli.RouterCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:nodemanager + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\timelineservice\* + set CLASSPATH=%HADOOP_YARN_HOME%\%YARN_DIR%\timelineservice\lib\*;%CLASSPATH% + set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager + set YARN_OPTS=%YARN_OPTS% -server %HADOOP_NODEMANAGER_OPTS% + if defined YARN_NODEMANAGER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_NODEMANAGER_HEAPSIZE%m + ) + goto :eof + +:proxyserver + set CLASS=org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer + set YARN_OPTS=%YARN_OPTS% %HADOOP_PROXYSERVER_OPTS% + if defined YARN_PROXYSERVER_HEAPSIZE ( + set JAVA_HEAP_MAX=-Xmx%YARN_PROXYSERVER_HEAPSIZE%m + ) + goto :eof + +:version + set CLASS=org.apache.hadoop.util.VersionInfo + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:jar + set CLASS=org.apache.hadoop.util.RunJar + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:logs + set CLASS=org.apache.hadoop.yarn.client.cli.LogsCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:daemonlog + set CLASS=org.apache.hadoop.log.LogLevel + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +:schedulerconf + set CLASS=org.apache.hadoop.yarn.client.cli.SchedConfCLI + set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS% + goto :eof + +@rem This changes %1, %2 etc. Hence those cannot be used after calling this. +:make_command_arguments + if "%1" == "--config" ( + shift + shift + ) + if "%1" == "--loglevel" ( + shift + shift + ) + if [%2] == [] goto :eof + shift + set _yarnarguments= + :MakeCmdArgsLoop + if [%1]==[] goto :EndLoop + + if not defined _yarnarguments ( + set _yarnarguments=%1 + ) else ( + set _yarnarguments=!_yarnarguments! %1 + ) + shift + goto :MakeCmdArgsLoop + :EndLoop + set yarn-command-arguments=%_yarnarguments% + goto :eof + +:print_usage + @echo Usage: yarn [--config confdir] [--loglevel loglevel] COMMAND + @echo where COMMAND is one of: + @echo resourcemanager run the ResourceManager + @echo nodemanager run a nodemanager on each slave + @echo router run the Router daemon + @echo globalpolicygenerator run the Global Policy Generator + @echo routeradmin router admin tools + @echo timelineserver run the timeline server + @echo timelinereader run the timeline reader server + @echo rmadmin admin tools + @echo version print the version + @echo jar ^ run a jar file + @echo application prints application(s) report/kill application + @echo applicationattempt prints applicationattempt(s) report + @echo cluster prints cluster information + @echo container prints container(s) report + @echo node prints node report(s) + @echo queue prints queue information + @echo logs dump container logs + @echo schedulerconf updates scheduler configuration + @echo classpath prints the class path needed to get the + @echo Hadoop jar and the required libraries + @echo daemonlog get/set the log level for each daemon + @echo or + @echo CLASSNAME run the class named CLASSNAME + @echo Most commands print help when invoked w/o parameters. + +endlocal