diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/exclude-nodes.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/exclude-nodes.xml
new file mode 100644
index 000000000..3f63b79f3
--- /dev/null
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/exclude-nodes.xml
@@ -0,0 +1,54 @@
+
+
+
+
+
+ content
+ hdfs slaves template
+ This is the freemarker template for hdfs file
+
+<#list exclude_hosts as host>
+${host}
+#list>
+#if>
+
+]]>
+
+
+
\ No newline at end of file
diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-env.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-env.xml
new file mode 100644
index 000000000..a24ecd3f5
--- /dev/null
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-env.xml
@@ -0,0 +1,206 @@
+
+
+
+
+ yarn_log_dir_prefix
+ /var/log/hadoop-yarn
+ YARN Log Dir Prefix
+ YARN Log Dir Prefix
+
+
+ yarn_pid_dir_prefix
+ /var/run/hadoop-yarn
+ YARN PID Dir Prefix
+ YARN PID Dir Prefix
+
+
+ yarn_heapsize
+ 1024
+ YARN Java heap size
+ Max heapsize for all YARN components using a numerical value in the scale of MB
+
+
+ resourcemanager_heapsize
+ 1024
+ ResourceManager Java heap size
+ Max heapsize for ResourceManager using a numerical value in the scale of MB
+
+
+ nodemanager_heapsize
+ 1024
+ NodeManager Java heap size
+ Max heapsize for NodeManager using a numerical value in the scale of MB
+
+
+ min_user_id
+ 1000
+ Minimum user ID for submitting job
+ Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs
+
+
+ is_supported_yarn_ranger
+ false
+ Set to false by default, needs to be set to true in stacks that use Ranger Yarn Plugin
+
+
+ yarn_user_nofile_limit
+ 32768
+ Max open files limit setting for YARN user.
+
+
+ yarn_user_nproc_limit
+ 65536
+ Max number of processes limit setting for YARN user.
+
+
+
+ content
+ yarn-env template
+ This is the jinja template for yarn-env.sh file
+
+ export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+ USER="$(whoami)"
+ export HADOOP_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+ export HADOOP_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+ export JAVA_HOME={{java64_home}}
+ export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+
+ # User for YARN daemons
+ export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+ # resolve links - $0 may be a softlink
+ export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_YARN_HOME/etc/hadoop}"
+
+ # some Java parameters
+ # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+ if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+ fi
+
+ if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+ fi
+
+ JAVA=$JAVA_HOME/bin/java
+ JAVA_HEAP_MAX=-Xmx1000m
+
+ # For setting YARN specific HEAP sizes please use this
+ # Parameter and set appropriately
+ YARN_HEAPSIZE={{yarn_heapsize}}
+
+ # check envvars which might override default args
+ if [ "$YARN_HEAPSIZE" != "" ]; then
+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+ fi
+
+ # Resource Manager specific parameters
+
+ # Specify the max Heapsize for the ResourceManager using a numerical value
+ # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+ # the value to 1000.
+ # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
+ # and/or YARN_RESOURCEMANAGER_OPTS.
+ # If not specified, the default value will be picked from either YARN_HEAPMAX
+ # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+ export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+ # Specify the JVM options to be used when starting the ResourceManager.
+ # These options will be appended to the options specified as HADOOP_OPTS
+ # and therefore may override any similar flags set in HADOOP_OPTS
+ #export YARN_RESOURCEMANAGER_OPTS=
+
+ # Node Manager specific parameters
+
+ # Specify the max Heapsize for the NodeManager using a numerical value
+ # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+ # the value to 1000.
+ # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
+ # and/or YARN_NODEMANAGER_OPTS.
+ # If not specified, the default value will be picked from either YARN_HEAPMAX
+ # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+ export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+ # Specify the max Heapsize for the HistoryManager using a numerical value
+ # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+ # the value to 1024.
+ # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
+ # and/or YARN_HISTORYSERVER_OPTS.
+ # If not specified, the default value will be picked from either YARN_HEAPMAX
+ # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+ export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+ # Specify the JVM options to be used when starting the NodeManager.
+ # These options will be appended to the options specified as HADOOP_OPTS
+ # and therefore may override any similar flags set in HADOOP_OPTS
+ #export YARN_NODEMANAGER_OPTS=
+
+ # so that filenames w/ spaces are handled correctly in loops below
+ IFS=
+
+
+ # default log directory and file
+ if [ "$HADOOP_LOG_DIR" = "" ]; then
+ HADOOP_LOG_DIR="$HADOOP_YARN_HOME/logs"
+ fi
+ if [ "$HADOOP_LOGFILE" = "" ]; then
+ HADOOP_LOGFILE='yarn.log'
+ fi
+
+ # default policy file for service-level authorization
+ if [ "$YARN_POLICYFILE" = "" ]; then
+ YARN_POLICYFILE="hadoop-policy.xml"
+ fi
+
+ # restore ordinary behaviour
+ unset IFS
+
+
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+ HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.dir=$HADOOP_LOG_DIR"
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
+ HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.file=$HADOOP_LOGFILE"
+ HADOOP_OPTS="$HADOOP_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME"
+ HADOOP_OPTS="$HADOOP_OPTS -Dyarn.id.str=$HADOOP_IDENT_STRING"
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
+ HADOOP_OPTS="$HADOOP_OPTS -Dyarn.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
+ export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+ export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+ fi
+ HADOOP_OPTS="$HADOOP_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+ HADOOP_OPTS="$HADOOP_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+
+ {% if rm_security_opts is defined %}
+ HADOOP_OPTS="{{rm_security_opts}} $HADOOP_OPTS"
+ {% endif %}
+
+
+
+ service_check.queue.name
+ default
+
+ The queue that used by service check.
+
+
+
diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-log4j.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-log4j.xml
new file mode 100644
index 000000000..600039b77
--- /dev/null
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-log4j.xml
@@ -0,0 +1,100 @@
+
+
+
+
+ yarn_rm_summary_log_max_backup_size
+ 256
+ The maximum size of backup file before the log is rotated
+ YARN Log: backup file size
+
+
+ yarn_rm_summary_log_number_of_backup_files
+ 20
+ The number of backup files
+ YARN Log: # of backup files
+
+
+ content
+ yarn-log4j template
+ Custom log4j.properties
+
+ #Relative to Yarn Log Dir Prefix
+ yarn.log.dir=.
+ #
+ # Job Summary Appender
+ #
+ # Use following logger to send summary to separate file defined by
+ # hadoop.mapreduce.jobsummary.log.file rolled daily:
+ # hadoop.mapreduce.jobsummary.logger=INFO,JSA
+ #
+ hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+ hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+ log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+ # Set the ResourceManager summary log filename
+ yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+ # Set the ResourceManager summary log level and appender
+ yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+ #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+ # To enable AppSummaryLogging for the RM,
+ # set yarn.server.resourcemanager.appsummary.logger to
+ # LEVEL,RMSUMMARY in hadoop-env.sh
+
+ # Appender for ResourceManager Application Summary Log
+ # Requires the following properties to be set
+ # - hadoop.log.dir (Hadoop Log directory)
+ # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+ # - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+ log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+ log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+ log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB
+ log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}
+ log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+ log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+ log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+ log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+ log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+ log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+ log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+ log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+
+ # Audit logging for ResourceManager
+ rm.audit.logger=${hadoop.root.logger}
+ log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+ log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+ log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+ log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+ log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+ log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+ log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+
+ # Audit logging for NodeManager
+ nm.audit.logger=${hadoop.root.logger}
+ log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+ log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+ log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+ log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+ log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+ log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+ log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+
+
+
+
diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-site.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-site.xml
new file mode 100644
index 000000000..f7d5dd3f5
--- /dev/null
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-site.xml
@@ -0,0 +1,476 @@
+
+
+
+
+
+ yarn.resourcemanager.hostname
+ localhost
+ The hostname of the RM.
+
+
+ yarn.resourcemanager.resource-tracker.address
+ localhost:8025
+ The address of ResourceManager.
+
+
+ yarn.resourcemanager.scheduler.address
+ localhost:8030
+ The address of the scheduler interface.
+
+
+ yarn.resourcemanager.address
+ localhost:8050
+
+ The address of the applications manager interface in the
+ RM.
+
+
+
+ yarn.resourcemanager.admin.address
+ localhost:8141
+ The address of the RM admin interface.
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+ The class to use as the resource scheduler.
+
+
+ yarn.scheduler.minimum-allocation-mb
+ 512
+
+ The minimum allocation for every container request at the RM,
+ in MBs. Memory requests lower than this won't take effect,
+ and the specified value will get allocated at minimum.
+
+ Minimum Container Size (Memory)
+
+
+ yarn.scheduler.maximum-allocation-mb
+ 5120
+
+ The maximum allocation for every container request at the RM,
+ in MBs. Memory requests higher than this won't take effect,
+ and will get capped to this value.
+
+ Maximum Container Size (Memory)
+
+
+ yarn.acl.enable
+ false
+ Are acls enabled.
+
+
+ yarn.admin.acl
+
+ ACL of who can be admin of the YARN cluster.
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:45454
+ The address of the container manager in the NM.
+
+
+ yarn.nodemanager.resource.memory-mb
+ 5120
+ Amount of physical memory, in MB, that can be allocated
+ for containers.
+ Memory allocated for all YARN containers on a node
+
+
+ yarn.application.classpath
+
+ {{hadoop_conf_dir}},{{hadoop_home}}/*,{{hadoop_home}}/lib/*,{{hadoop_hdfs_home}}/*,{{hadoop_hdfs_home}}/lib/*,{{hadoop_yarn_home}}/*,{{hadoop_yarn_home}}/lib/*
+ Classpath for typical applications.
+
+
+ yarn.nodemanager.vmem-pmem-ratio
+ 2.1
+ Ratio between virtual memory to physical memory when
+ setting memory limits for containers. Container allocations are
+ expressed in terms of physical memory, and virtual memory usage
+ is allowed to exceed this allocation by this ratio.
+
+ Virtual Memory Ratio
+
+
+ yarn.nodemanager.container-executor.class
+ org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor
+ ContainerExecutor for launching containers
+
+
+ yarn.nodemanager.linux-container-executor.group
+ hadoop
+ Unix group of the NodeManager
+
+
+ yarn.nodemanager.aux-services
+ mapreduce_shuffle
+ Auxilliary services of NodeManager. A valid service name should only contain
+ a-zA-Z0-9_ and can
+ not start with numbers
+
+
+ yarn.nodemanager.aux-services.mapreduce_shuffle.class
+ org.apache.hadoop.mapred.ShuffleHandler
+ The auxiliary service class to use
+
+
+ yarn.nodemanager.log-dirs
+ YARN NodeManager Log directories
+ /hadoop/yarn/log
+
+ Where to store container logs. An application's localized log directory
+ will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+ Individual containers' log directories will be below this, in directories
+ named container_{$contid}. Each container directory will contain the files
+ stderr, stdin, and syslog generated by that container.
+
+
+
+ yarn.nodemanager.local-dirs
+ YARN NodeManager Local directories
+ /hadoop/yarn/local
+
+ List of directories to store localized files in. An
+ application's localized file directory will be found in:
+ ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+ Individual containers' work directories, called container_${contid}, will
+ be subdirectories of this.
+
+
+
+ yarn.nodemanager.container-monitor.interval-ms
+ 3000
+
+ The interval, in milliseconds, for which the node manager
+ waits between two cycles of monitoring its containers' memory usage.
+
+
+
+
+ yarn.nodemanager.health-checker.interval-ms
+ 135000
+ Frequency of running node health script.
+
+
+ yarn.nodemanager.health-checker.script.timeout-ms
+ 60000
+ Script time out period.
+
+
+ yarn.nodemanager.log.retain-seconds
+ 604800
+
+ Time in seconds to retain user logs. Only applicable if
+ log aggregation is disabled.
+
+
+
+ yarn.log-aggregation-enable
+ true
+ Whether to enable log aggregation.
+ Enable Log Aggregation
+
+
+ yarn.nodemanager.remote-app-log-dir
+ YARN NodeManager Remote App Log directory
+ /app-logs
+ Location to aggregate logs to.
+
+
+ yarn.nodemanager.remote-app-log-dir-suffix
+ logs
+
+ The remote log dir will be created at
+ {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+
+
+
+ yarn.nodemanager.log-aggregation.compression-type
+ gz
+
+ T-file compression types used to compress aggregated logs.
+
+
+
+ yarn.nodemanager.delete.debug-delay-sec
+ 0
+
+ Number of seconds after an application finishes before the nodemanager's
+ DeletionService will delete the application's localized file directory
+ and log directory.
+
+ To diagnose Yarn application problems, set this property's value large
+ enough (for example, to 600 = 10 minutes) to permit examination of these
+ directories. After changing the property's value, you must restart the
+ nodemanager in order for it to have an effect.
+
+ The roots of Yarn applications' work directories is configurable with
+ the yarn.nodemanager.local-dirs property (see below), and the roots
+ of the Yarn applications' log directories is configurable with the
+ yarn.nodemanager.log-dirs property (see also below).
+
+
+
+ yarn.log-aggregation.retain-seconds
+ 2592000
+
+ How long to keep aggregation logs before deleting them. -1 disables.
+ Be careful set this too small and you will spam the name node.
+
+
+
+ yarn.nodemanager.admin-env
+ MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
+
+ Environment variables that should be forwarded from the NodeManager's
+ environment to the container's.
+
+
+
+ yarn.nodemanager.disk-health-checker.min-healthy-disks
+ 0.25
+
+ The minimum fraction of number of disks to be healthy for the nodemanager
+ to launch new containers. This correspond to both
+ yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+ If there are less number of healthy local-dirs (or log-dirs) available,
+ then new containers will not be launched on this node.
+
+
+
+ yarn.resourcemanager.am.max-attempts
+ 2
+
+ The maximum number of application attempts. It's a global
+ setting for all application masters. Each application master can specify
+ its individual maximum number of application attempts via the API, but the
+ individual number cannot be more than the global upper bound. If it is,
+ the resourcemanager will override it. The default number is set to 2, to
+ allow at least one retry for AM.
+
+
+
+ yarn.resourcemanager.webapp.address
+ localhost:8088
+
+ The address of the RM web application.
+
+
+
+ yarn.resourcemanager.webapp.https.address
+ localhost:8090
+
+ The https address of the RM web application.
+
+
+
+ yarn.nodemanager.vmem-check-enabled
+ false
+
+ Whether virtual memory limits will be enforced for containers.
+
+
+
+ yarn.log.server.url
+ http://localhost:19888/jobhistory/logs
+
+ URI for the HistoryServer's log resource
+
+
+
+ yarn.resourcemanager.nodes.exclude-path
+ /etc/hadoop/conf/yarn.exclude
+
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the resource manager. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+
+
+
+ manage.include.files
+ false
+ If true Ambari will manage include file if
+ yarn.resourcemanager.nodes.include-path is configured.
+
+
+ yarn.http.policy
+ HTTP_ONLY
+
+ This configures the HTTP endpoint for Yarn Daemons.The following values are supported: -
+ HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on
+ https
+
+
+
+ yarn.timeline-service.enabled
+ true
+ Indicate to clients whether timeline service is enabled or not.
+ If enabled, clients will put entities and events to the timeline server.
+
+
+
+ yarn.timeline-service.generic-application-history.store-class
+ org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore
+
+ Store class name for history store, defaulting to file system store
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.path
+ /var/log/hadoop-yarn/timeline
+
+ Store file name for leveldb timeline store
+
+
+
+ yarn.timeline-service.webapp.address
+ localhost:8188
+
+ The http address of the timeline service web application.
+
+
+
+ yarn.timeline-service.webapp.https.address
+ localhost:8190
+
+ The http address of the timeline service web application.
+
+
+
+ yarn.timeline-service.address
+ localhost:10200
+
+ This is default address for the timeline server to start
+ the RPC server.
+
+
+
+ Enable age off of timeline store data.
+ yarn.timeline-service.ttl-enable
+ true
+
+
+ Time to live for timeline store data in milliseconds.
+ yarn.timeline-service.ttl-ms
+ 2678400000
+
+
+ Length of time to wait between deletion cycles of leveldb timeline store in
+ milliseconds.
+ yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms
+ 300000
+
+
+ yarn.timeline-service.recovery.enabled
+
+ Enable timeline server to recover state after starting. If
+ true, then yarn.timeline-service.state-store-class must be specified.
+
+ true
+
+
+ yarn.acl.enable
+ false
+ Are acls enabled.
+
+
+ yarn.authorization-provider
+ Yarn authorization provider class.
+
+
+ yarn.admin.acl
+ yarn
+ ACL of who can be admin of the YARN cluster.
+
+
+
+ yarn.timeline-service.store-class
+ org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore
+ Main storage class for YARN timeline server.
+
+
+ yarn.timeline-service.entity-group-fs-store.active-dir
+ /ats/active/
+ DFS path to store active application’s timeline data
+
+
+ yarn.timeline-service.entity-group-fs-store.done-dir
+ /ats/done/
+ DFS path to store done application’s timeline data
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
+
+ Plugins that can translate a timeline entity read request into a list of
+ timeline cache ids, separated by commas.
+
+
+
+ yarn.timeline-service.entity-group-fs-store.summary-store
+ Summary storage for ATS v1.5
+
+ org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
+
+
+ yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
+
+ Scan interval for ATS v1.5 entity group file system storage reader.This
+ value controls how frequent the reader will scan the HDFS active directory
+ for application status.
+
+
+ 60
+
+
+ yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
+
+ Scan interval for ATS v1.5 entity group file system storage cleaner.This
+ value controls how frequent the reader will scan the HDFS done directory
+ for stale application data.
+
+
+ 3600
+
+
+ yarn.timeline-service.entity-group-fs-store.retain-seconds
+
+ How long the ATS v1.5 entity group file system storage will keep an
+ application's data in the done directory.
+
+
+ 604800
+
+
+ yarn.log.server.web-service.url
+ http://localhost:8188/ws/v1/applicationhistory
+ Log Server Web Service URL.
+
+
\ No newline at end of file
diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn.conf.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn.conf.xml
new file mode 100644
index 000000000..84f2d3fa7
--- /dev/null
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn.conf.xml
@@ -0,0 +1,51 @@
+
+
+
+
+
+ content
+ yarn.conf template
+ This is the freemarker template for yarn file
+
+
+
+
\ No newline at end of file
diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/metainfo.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/metainfo.xml
new file mode 100644
index 000000000..478393b07
--- /dev/null
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/metainfo.xml
@@ -0,0 +1,107 @@
+
+
+
+
+ 2.0
+
+ yarn
+ YARN
+ Apache Hadoop NextGen MapReduce (YARN)
+ 3.3.6
+ yarn
+ yarn
+
+
+
+ resourcemanager
+ ResourceManager
+ master
+ 1-2
+
+ org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn.ResourceManagerScript
+ java
+ 1200
+
+
+
+ decommission
+
+ org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn.ResourceManagerScript
+ java
+ 600
+
+
+
+ refreshqueues
+
+ org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn.ResourceManagerScript
+ java
+ 600
+
+
+
+
+
+ nodemanager
+ NodeManager
+ slave
+ 1+
+
+ org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn.NodeManagerScript
+ java
+ 1200
+
+
+
+ yarn_client
+ YARN Client
+ client
+ 1+
+
+ org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn.YarnClientScript
+ java
+ 1200
+
+
+
+
+
+
+
+ centos7
+ rocky8
+
+
+ x86_64
+
+
+ hadoop_3_3_0-yarn
+ hadoop_3_3_0-hdfs
+
+
+
+
+
+ zookeeper
+ hdfs
+
+
+
+
diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/order.json b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/order.json
new file mode 100644
index 000000000..9c9addb58
--- /dev/null
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/order.json
@@ -0,0 +1,20 @@
+{
+ "RESOURCEMANAGER-START": [
+ "ZOOKEEPER_SERVER-START"
+ ],
+ "NODEMANAGER-START": [
+ "NAMENODE-START",
+ "DATANODE-START",
+ "RESOURCEMANAGER-START"
+ ],
+ "RESOURCEMANAGER-START": [
+ "NAMENODE-START",
+ "DATANODE-START"
+ ],
+ "RESOURCEMANAGER-RESTART": [
+ "NAMENODE-RESTART"
+ ],
+ "NODEMANAGER-RESTART": [
+ "NAMENODE-RESTART"
+ ]
+}
\ No newline at end of file
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java
index bc782f70e..4d4aee235 100644
--- a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java
@@ -116,11 +116,11 @@ public Map hadoopEnv() {
hadoopLogDir = (String) hadoopEnv.get("hadoop_log_dir_prefix");
hadoopPidDir = (String) hadoopEnv.get("hadoop_pid_dir_prefix");
- nameNodePidFile = MessageFormat.format("{0}/hadoop-{1}-namenode.pid", hadoopPidDir, user());
- dataNodePidFile = MessageFormat.format("{0}/hadoop-{1}-datanode.pid", hadoopPidDir, user());
- sNameNodePidFile = MessageFormat.format("{0}/hadoop-{1}-secondarynamenode.pid", hadoopPidDir, user());
- journalNodePidFile = MessageFormat.format("{0}/hadoop-{1}-journalnode.pid", hadoopPidDir, user());
- zkfcPidFile = MessageFormat.format("{0}/hadoop-{1}-zkfc.pid", hadoopPidDir, user());
+ nameNodePidFile = MessageFormat.format("{0}/{1}/hadoop-{1}-namenode.pid", hadoopPidDir, user());
+ dataNodePidFile = MessageFormat.format("{0}/{1}/hadoop-{1}-datanode.pid", hadoopPidDir, user());
+ sNameNodePidFile = MessageFormat.format("{0}/{1}/hadoop-{1}-secondarynamenode.pid", hadoopPidDir, user());
+ journalNodePidFile = MessageFormat.format("{0}/{1}/hadoop-{1}-journalnode.pid", hadoopPidDir, user());
+ zkfcPidFile = MessageFormat.format("{0}/{1}/hadoop-{1}-zkfc.pid", hadoopPidDir, user());
return hadoopEnv;
}
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/NodeManagerScript.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/NodeManagerScript.java
new file mode 100644
index 000000000..3348c1b4a
--- /dev/null
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/NodeManagerScript.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn;
+
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.spi.stack.Params;
+import org.apache.bigtop.manager.spi.stack.Script;
+import org.apache.bigtop.manager.stack.common.exception.StackException;
+import org.apache.bigtop.manager.stack.common.utils.PackageUtils;
+import org.apache.bigtop.manager.stack.common.utils.linux.LinuxOSUtils;
+
+import com.google.auto.service.AutoService;
+import lombok.extern.slf4j.Slf4j;
+
+import java.text.MessageFormat;
+
+@Slf4j
+@AutoService(Script.class)
+public class NodeManagerScript implements Script {
+
+ @Override
+ public ShellResult install(Params params) {
+ return PackageUtils.install(params.getPackageList());
+ }
+
+ @Override
+ public ShellResult configure(Params params) {
+ return YarnSetup.config(params, "nodemanager");
+ }
+
+ @Override
+ public ShellResult start(Params params) {
+ configure(params);
+ YarnParams yarnParams = (YarnParams) params;
+
+ String cmd = MessageFormat.format("{0} --daemon start nodemanager", yarnParams.yarnExec());
+ try {
+ return LinuxOSUtils.sudoExecCmd(cmd, yarnParams.user());
+ } catch (Exception e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult stop(Params params) {
+ YarnParams yarnParams = (YarnParams) params;
+ String cmd = MessageFormat.format("{0} --daemon stop nodemanager", yarnParams.yarnExec());
+ try {
+ return LinuxOSUtils.sudoExecCmd(cmd, yarnParams.user());
+ } catch (Exception e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult status(Params params) {
+ YarnParams yarnParams = (YarnParams) params;
+ return LinuxOSUtils.checkProcess(yarnParams.getNodeManagerPidFile());
+ }
+}
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/ResourceManagerScript.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/ResourceManagerScript.java
new file mode 100644
index 000000000..15637608f
--- /dev/null
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/ResourceManagerScript.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn;
+
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.spi.stack.Params;
+import org.apache.bigtop.manager.spi.stack.Script;
+import org.apache.bigtop.manager.stack.common.exception.StackException;
+import org.apache.bigtop.manager.stack.common.utils.PackageUtils;
+import org.apache.bigtop.manager.stack.common.utils.linux.LinuxOSUtils;
+
+import com.google.auto.service.AutoService;
+import lombok.extern.slf4j.Slf4j;
+
+import java.text.MessageFormat;
+
+@Slf4j
+@AutoService(Script.class)
+public class ResourceManagerScript implements Script {
+
+ @Override
+ public ShellResult install(Params params) {
+ return PackageUtils.install(params.getPackageList());
+ }
+
+ @Override
+ public ShellResult configure(Params params) {
+ return YarnSetup.config(params, "resourcemanager");
+ }
+
+ @Override
+ public ShellResult start(Params params) {
+ configure(params);
+ YarnParams yarnParams = (YarnParams) params;
+
+ String cmd = MessageFormat.format("{0} --daemon start resourcemanager", yarnParams.yarnExec());
+ try {
+ return LinuxOSUtils.sudoExecCmd(cmd, yarnParams.user());
+ } catch (Exception e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult stop(Params params) {
+ YarnParams yarnParams = (YarnParams) params;
+ String cmd = MessageFormat.format("{0} --daemon stop resourcemanager", yarnParams.yarnExec());
+ try {
+ return LinuxOSUtils.sudoExecCmd(cmd, yarnParams.user());
+ } catch (Exception e) {
+ throw new StackException(e);
+ }
+ }
+
+ @Override
+ public ShellResult status(Params params) {
+ YarnParams yarnParams = (YarnParams) params;
+ return LinuxOSUtils.checkProcess(yarnParams.getResourceManagerPidFile());
+ }
+
+}
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnClientScript.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnClientScript.java
new file mode 100644
index 000000000..31c38542f
--- /dev/null
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnClientScript.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn;
+
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.spi.stack.ClientScript;
+import org.apache.bigtop.manager.spi.stack.Params;
+import org.apache.bigtop.manager.spi.stack.Script;
+import org.apache.bigtop.manager.stack.common.utils.PackageUtils;
+
+import com.google.auto.service.AutoService;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+@AutoService(Script.class)
+public class YarnClientScript implements ClientScript {
+
+ @Override
+ public ShellResult install(Params params) {
+ return PackageUtils.install(params.getPackageList());
+ }
+
+ @Override
+ public ShellResult configure(Params params) {
+ return YarnSetup.config(params);
+ }
+}
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnParams.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnParams.java
new file mode 100644
index 000000000..707b52fb3
--- /dev/null
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnParams.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn;
+
+import org.apache.bigtop.manager.common.message.entity.payload.CommandPayload;
+import org.apache.bigtop.manager.stack.common.annotations.GlobalParams;
+import org.apache.bigtop.manager.stack.common.utils.BaseParams;
+import org.apache.bigtop.manager.stack.common.utils.LocalSettings;
+
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
+
+import java.text.MessageFormat;
+import java.util.List;
+import java.util.Map;
+
+@Getter
+@Slf4j
+public class YarnParams extends BaseParams {
+
+ private String yarnLogDir = "/var/log/hadoop-yarn";
+ private String yarnPidDir = "/var/run/hadoop-yarn";
+ private String rmNodesExcludeDir = "/etc/hadoop/conf/yarn.exclude";
+ /* pid file */
+ private String resourceManagerPidFile = yarnPidDir + "/yarn/hadoop-yarn-resourcemanager.pid";
+ private String nodeManagerPidFile = yarnPidDir + "/yarn/hadoop-yarn-nodemanager.pid";
+ /* pid file */
+ private List excludeHosts = List.of();
+
+ public YarnParams(CommandPayload commandPayload) {
+ super(commandPayload);
+ globalParamsMap.put("yarn_user", user());
+ globalParamsMap.put("yarn_group", group());
+ globalParamsMap.put("java_home", "/usr/local/java");
+ globalParamsMap.put("hadoop_home", serviceHome());
+ globalParamsMap.put("hadoop_hdfs_home", hdfsHome());
+ globalParamsMap.put("hadoop_conf_dir", confDir());
+
+ globalParamsMap.put("exclude_hosts", excludeHosts);
+ }
+
+ public String yarnLimits() {
+ Map yarnConf = LocalSettings.configurations(serviceName(), "yarn.conf");
+ return (String) yarnConf.get("content");
+ }
+
+ public String excludeNodesContent() {
+ Map excludeNodesContent = LocalSettings.configurations(serviceName(), "exclude-nodes.xml");
+ return (String) excludeNodesContent.get("content");
+ }
+
+ @GlobalParams
+ public Map yarnLog4j() {
+ return LocalSettings.configurations(serviceName(), "yarn-log4j");
+ }
+
+ @GlobalParams
+ public Map yarnSite() {
+ Map yarnSite = LocalSettings.configurations(serviceName(), "yarn-site");
+ rmNodesExcludeDir = (String) yarnSite.get("yarn.resourcemanager.nodes.exclude-path");
+
+ return yarnSite;
+ }
+
+ @GlobalParams
+ public Map yarnEnv() {
+ Map yarnEnv = LocalSettings.configurations(serviceName(), "yarn-env");
+
+ yarnLogDir = (String) yarnEnv.get("yarn_log_dir_prefix");
+ yarnPidDir = (String) yarnEnv.get("yarn_pid_dir_prefix");
+ resourceManagerPidFile = MessageFormat.format("{0}/{1}/hadoop-{1}-resourcemanager.pid", yarnPidDir, user());
+ nodeManagerPidFile = MessageFormat.format("{0}/{1}/hadoop-{1}-nodemanager.pid", yarnPidDir, user());
+ return yarnEnv;
+ }
+
+ @Override
+ public String confDir() {
+ return "/etc/hadoop/conf";
+ }
+
+ @Override
+ public String serviceHome() {
+ return stackLibDir() + "/hadoop";
+ }
+
+ public String hdfsHome() {
+ return stackLibDir() + "/hadoop-hdfs";
+ }
+
+ public String yarnExec() {
+ return stackBinDir() + "/yarn";
+ }
+
+ public String yarnHome() {
+ return stackBinDir() + "/hadoop-yarn";
+ }
+}
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnSetup.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnSetup.java
new file mode 100644
index 000000000..d76ad9d06
--- /dev/null
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnSetup.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.yarn;
+
+import org.apache.bigtop.manager.common.constants.Constants;
+import org.apache.bigtop.manager.common.shell.ShellResult;
+import org.apache.bigtop.manager.spi.stack.Params;
+import org.apache.bigtop.manager.stack.common.enums.ConfigType;
+import org.apache.bigtop.manager.stack.common.utils.BaseParams;
+import org.apache.bigtop.manager.stack.common.utils.linux.LinuxFileUtils;
+
+import org.apache.commons.lang3.StringUtils;
+
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+
+import java.text.MessageFormat;
+import java.util.Map;
+
+@Slf4j
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
+public class YarnSetup {
+
+ public static ShellResult config(Params params) {
+ return config(params, null);
+ }
+
+ public static ShellResult config(Params params, String componentName) {
+ log.info("starting YARN config");
+ YarnParams yarnParams = (YarnParams) params;
+
+ String confDir = yarnParams.confDir();
+ String yarnUser = yarnParams.user();
+ String yarnGroup = yarnParams.group();
+ Map yarnEnv = yarnParams.yarnEnv();
+
+ if (StringUtils.isNotBlank(componentName)) {
+ switch (componentName) {
+ case "resourcemanager": {
+ LinuxFileUtils.createDirectories(
+ yarnParams.getRmNodesExcludeDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
+
+ LinuxFileUtils.toFileByTemplate(
+ yarnParams.excludeNodesContent(),
+ yarnParams.getRmNodesExcludeDir(),
+ yarnUser,
+ yarnGroup,
+ Constants.PERMISSION_644,
+ yarnParams.getGlobalParamsMap());
+ }
+ }
+ }
+
+ // mkdir directories
+ LinuxFileUtils.createDirectories(
+ yarnParams.getYarnLogDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
+ LinuxFileUtils.createDirectories(
+ yarnParams.getYarnPidDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
+
+ // hdfs.limits
+ LinuxFileUtils.toFileByTemplate(
+ yarnParams.yarnLimits(),
+ MessageFormat.format("{0}/yarn.conf", BaseParams.LIMITS_CONF_DIR),
+ Constants.ROOT_USER,
+ Constants.ROOT_USER,
+ Constants.PERMISSION_644,
+ yarnParams.getGlobalParamsMap());
+
+ // hadoop-env.sh
+ LinuxFileUtils.toFileByTemplate(
+ yarnEnv.get("content").toString(),
+ MessageFormat.format("{0}/yarn-env.sh", confDir),
+ yarnUser,
+ yarnGroup,
+ Constants.PERMISSION_644,
+ yarnParams.getGlobalParamsMap());
+
+ // hdfs-site.xml
+ LinuxFileUtils.toFile(
+ ConfigType.XML,
+ MessageFormat.format("{0}/yarn-site.xml", confDir),
+ yarnUser,
+ yarnGroup,
+ Constants.PERMISSION_644,
+ yarnParams.yarnSite());
+
+
+ // log4j
+ LinuxFileUtils.toFileByTemplate(
+ yarnParams.yarnLog4j().get("content").toString(),
+ MessageFormat.format("{0}/yarnservice-log4j.properties", confDir),
+ yarnUser,
+ yarnGroup,
+ Constants.PERMISSION_644,
+ yarnParams.getGlobalParamsMap());
+
+ return ShellResult.success("YARN Configure success!");
+ }
+
+}