• R/O
  • SSH
  • HTTPS

metasearch: Commit


Commit MetaInfo

Revisión579 (tree)
Tiempo2013-11-16 22:54:43
Autorwhitestar

Log Message

add default configuration files of the ver. 5.0.0

Cambiar Resumen

Diferencia incremental

--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/default.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/default.xml (revision 579)
@@ -0,0 +1,384 @@
1+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<configuration>
4+<property><name>mapreduce.job.ubertask.enable</name><value>false</value><source>mapred-default.xml</source></property>
5+<property><name>yarn.resourcemanager.max-completed-applications</name><value>10000</value><source>yarn-default.xml</source></property>
6+<property><name>yarn.resourcemanager.delayed.delegation-token.removal-interval-ms</name><value>30000</value><source>yarn-default.xml</source></property>
7+<property><name>io.bytes.per.checksum</name><value>512</value><source>core-default.xml</source></property>
8+<property><name>mapreduce.client.submit.file.replication</name><value>10</value><source>mapred-default.xml</source></property>
9+<property><name>mapreduce.jobhistory.cleaner.interval-ms</name><value>86400000</value><source>mapred-default.xml</source></property>
10+<property><name>yarn.nodemanager.container-manager.thread-count</name><value>20</value><source>yarn-default.xml</source></property>
11+<property><name>yarn.nodemanager.pmem-check-enabled</name><value>true</value><source>yarn-default.xml</source></property>
12+<property><name>mapreduce.jobhistory.done-dir</name><value>${yarn.app.mapreduce.am.staging-dir}/history/done</value><source>mapred-default.xml</source></property>
13+<property><name>mapreduce.tasktracker.healthchecker.interval</name><value>60000</value><source>mapred-default.xml</source></property>
14+<property><name>mapreduce.jobtracker.staging.root.dir</name><value>${hadoop.tmp.dir}/mapred/staging</value><source>mapred-default.xml</source></property>
15+<property><name>yarn.resourcemanager.recovery.enabled</name><value>false</value><source>yarn-default.xml</source></property>
16+<property><name>yarn.resourcemanager.ha.admin.client.thread-count</name><value>1</value><source>yarn-default.xml</source></property>
17+<property><name>fs.AbstractFileSystem.file.impl</name><value>org.apache.hadoop.fs.local.LocalFs</value><source>core-default.xml</source></property>
18+<property><name>mapreduce.client.completion.pollinterval</name><value>5000</value><source>mapred-default.xml</source></property>
19+<property><name>mapreduce.jobhistory.client.thread-count</name><value>10</value><source>mapred-default.xml</source></property>
20+<property><name>mapreduce.job.ubertask.maxreduces</name><value>1</value><source>mapred-default.xml</source></property>
21+<property><name>mapreduce.reduce.shuffle.memory.limit.percent</name><value>0.25</value><source>mapred-default.xml</source></property>
22+<property><name>yarn.client.max-nodemanagers-proxies</name><value>500</value><source>yarn-default.xml</source></property>
23+<property><name>hadoop.ssl.keystores.factory.class</name><value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value><source>core-default.xml</source></property>
24+<property><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value><source>core-default.xml</source></property>
25+<property><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value><source>yarn-default.xml</source></property>
26+<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value><source>core-default.xml</source></property>
27+<property><name>s3.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
28+<property><name>mapreduce.task.io.sort.factor</name><value>10</value><source>mapred-default.xml</source></property>
29+<property><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value><source>yarn-default.xml</source></property>
30+<property><name>yarn.admin.acl</name><value>*</value><source>yarn-default.xml</source></property>
31+<property><name>mapreduce.job.speculative.speculativecap</name><value>0.1</value><source>mapred-default.xml</source></property>
32+<property><name>yarn.nodemanager.resource.memory-mb</name><value>8192</value><source>yarn-default.xml</source></property>
33+<property><name>io.map.index.interval</name><value>128</value><source>core-default.xml</source></property>
34+<property><name>nfs3.mountd.port</name><value>4242</value><source>core-default.xml</source></property>
35+<property><name>s3.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
36+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>${hadoop.tmp.dir}/yarn/system/rmstore</value><source>yarn-default.xml</source></property>
37+<property><name>mapreduce.task.files.preserve.failedtasks</name><value>false</value><source>mapred-default.xml</source></property>
38+<property><name>ha.zookeeper.session-timeout.ms</name><value>5000</value><source>core-default.xml</source></property>
39+<property><name>s3.replication</name><value>3</value><source>core-default.xml</source></property>
40+<property><name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name><value>org.apache.hadoop.mapred.ShuffleHandler</value><source>yarn-site.xml</source></property>
41+<property><name>mapreduce.reduce.shuffle.connect.timeout</name><value>180000</value><source>mapred-default.xml</source></property>
42+<property><name>hadoop.ssl.enabled</name><value>false</value><source>core-default.xml</source></property>
43+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value><source>yarn-site.xml</source></property>
44+<property><name>mapreduce.job.counters.max</name><value>120</value><source>mapred-default.xml</source></property>
45+<property><name>ipc.client.connect.max.retries.on.timeouts</name><value>45</value><source>core-default.xml</source></property>
46+<property><name>mapreduce.job.complete.cancel.delegation.tokens</name><value>true</value><source>mapred-default.xml</source></property>
47+<property><name>yarn.resourcemanager.ha.fencer</name><value>org.apache.hadoop.yarn.server.resourcemanager.ZKStoreNodeFencer </value><source>yarn-default.xml</source></property>
48+<property><name>fs.trash.interval</name><value>0</value><source>core-default.xml</source></property>
49+<property><name>yarn.resourcemanager.admin.address</name><value>localhost:8033</value><source>programatically</source></property>
50+<property><name>ha.health-monitor.check-interval.ms</name><value>1000</value><source>core-default.xml</source></property>
51+<property><name>hadoop.jetty.logs.serve.aliases</name><value>true</value><source>core-default.xml</source></property>
52+<property><name>hadoop.http.authentication.kerberos.principal</name><value>HTTP/_HOST@LOCALHOST</value><source>core-default.xml</source></property>
53+<property><name>mapreduce.tasktracker.taskmemorymanager.monitoringinterval</name><value>5000</value><source>mapred-default.xml</source></property>
54+<property><name>mapreduce.job.reduce.shuffle.consumer.plugin.class</name><value>org.apache.hadoop.mapreduce.task.reduce.Shuffle</value><source>mapred-default.xml</source></property>
55+<property><name>s3native.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
56+<property><name>ha.health-monitor.sleep-after-disconnect.ms</name><value>1000</value><source>core-default.xml</source></property>
57+<property><name>yarn.resourcemanager.nodemanagers.heartbeat-interval-ms</name><value>1000</value><source>yarn-default.xml</source></property>
58+<property><name>yarn.log-aggregation.retain-check-interval-seconds</name><value>-1</value><source>yarn-default.xml</source></property>
59+<property><name>mapreduce.jobtracker.jobhistory.task.numberprogresssplits</name><value>12</value><source>mapred-default.xml</source></property>
60+<property><name>mapreduce.map.cpu.vcores</name><value>1</value><source>mapred-default.xml</source></property>
61+<property><name>yarn.acl.enable</name><value>true</value><source>yarn-default.xml</source></property>
62+<property><name>hadoop.security.instrumentation.requires.admin</name><value>false</value><source>core-default.xml</source></property>
63+<property><name>yarn.nodemanager.localizer.fetch.thread-count</name><value>4</value><source>yarn-default.xml</source></property>
64+<property><name>hadoop.security.authorization</name><value>false</value><source>core-default.xml</source></property>
65+<property><name>hadoop.security.group.mapping.ldap.search.filter.group</name><value>(objectClass=group)</value><source>core-default.xml</source></property>
66+<property><name>rpc.engine.org.apache.hadoop.yarn.server.api.ResourceTrackerPB</name><value>org.apache.hadoop.ipc.ProtobufRpcEngine</value><source>programatically</source></property>
67+<property><name>mapreduce.output.fileoutputformat.compress.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value><source>mapred-default.xml</source></property>
68+<property><name>mapreduce.shuffle.max.connections</name><value>0</value><source>mapred-default.xml</source></property>
69+<property><name>mapreduce.shuffle.port</name><value>13562</value><source>mapred-default.xml</source></property>
70+<property><name>mapreduce.reduce.log.level</name><value>INFO</value><source>mapred-default.xml</source></property>
71+<property><name>yarn.log-aggregation-enable</name><value>true</value><source>yarn-site.xml</source></property>
72+<property><name>mapreduce.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value><source>mapred-default.xml</source></property>
73+<property><name>hadoop.security.group.mapping.ldap.search.attr.group.name</name><value>cn</value><source>core-default.xml</source></property>
74+<property><name>fs.client.resolve.remote.symlinks</name><value>true</value><source>core-default.xml</source></property>
75+<property><name>s3native.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
76+<property><name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name><value>5000</value><source>mapred-default.xml</source></property>
77+<property><name>tfile.fs.output.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
78+<property><name>yarn.nodemanager.local-dirs</name><value>/var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir</value><source>yarn-site.xml</source></property>
79+<property><name>mapreduce.jobtracker.persist.jobstatus.active</name><value>true</value><source>mapred-default.xml</source></property>
80+<property><name>fs.AbstractFileSystem.hdfs.impl</name><value>org.apache.hadoop.fs.Hdfs</value><source>core-default.xml</source></property>
81+<property><name>mapreduce.job.map.output.collector.class</name><value>org.apache.hadoop.mapred.MapTask$MapOutputBuffer</value><source>mapred-default.xml</source></property>
82+<property><name>mapreduce.tasktracker.local.dir.minspacestart</name><value>0</value><source>mapred-default.xml</source></property>
83+<property><name>hadoop.security.uid.cache.secs</name><value>14400</value><source>core-default.xml</source></property>
84+<property><name>yarn.resourcemanager.scheduler.monitor.policies</name><value>org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy</value><source>yarn-default.xml</source></property>
85+<property><name>hadoop.ssl.client.conf</name><value>ssl-client.xml</value><source>core-default.xml</source></property>
86+<property><name>mapreduce.tasktracker.local.dir.minspacekill</name><value>0</value><source>mapred-default.xml</source></property>
87+<property><name>mapreduce.jobtracker.retiredjobs.cache.size</name><value>1000</value><source>mapred-default.xml</source></property>
88+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value><source>yarn-default.xml</source></property>
89+<property><name>mapreduce.job.reduce.slowstart.completedmaps</name><value>0.05</value><source>mapred-default.xml</source></property>
90+<property><name>mapreduce.job.end-notification.retry.attempts</name><value>0</value><source>mapred-default.xml</source></property>
91+<property><name>mapreduce.tasktracker.outofband.heartbeat</name><value>false</value><source>mapred-default.xml</source></property>
92+<property><name>io.native.lib.available</name><value>true</value><source>core-default.xml</source></property>
93+<property><name>mapreduce.jobtracker.persist.jobstatus.hours</name><value>1</value><source>mapred-default.xml</source></property>
94+<property><name>yarn.resourcemanager.ha.automatic-failover.enabled</name><value>false</value><source>yarn-default.xml</source></property>
95+<property><name>mapreduce.client.progressmonitor.pollinterval</name><value>1000</value><source>mapred-default.xml</source></property>
96+<property><name>mapreduce.reduce.input.buffer.percent</name><value>0.0</value><source>mapred-default.xml</source></property>
97+<property><name>mapreduce.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value><source>mapred-default.xml</source></property>
98+<property><name>mapreduce.map.skip.proc.count.autoincr</name><value>true</value><source>mapred-default.xml</source></property>
99+<property><name>mapreduce.jobtracker.address</name><value>local</value><source>mapred-default.xml</source></property>
100+<property><name>mapreduce.cluster.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value><source>mapred-default.xml</source></property>
101+<property><name>mapreduce.tasktracker.taskcontroller</name><value>org.apache.hadoop.mapred.DefaultTaskController</value><source>mapred-default.xml</source></property>
102+<property><name>mapreduce.reduce.shuffle.parallelcopies</name><value>5</value><source>mapred-default.xml</source></property>
103+<property><name>yarn.nodemanager.env-whitelist</name><value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME</value><source>yarn-default.xml</source></property>
104+<property><name>mapreduce.jobtracker.heartbeats.in.second</name><value>100</value><source>mapred-default.xml</source></property>
105+<property><name>mapreduce.job.maxtaskfailures.per.tracker</name><value>3</value><source>mapred-default.xml</source></property>
106+<property><name>ipc.client.connection.maxidletime</name><value>10000</value><source>core-default.xml</source></property>
107+<property><name>mapreduce.shuffle.ssl.enabled</name><value>false</value><source>mapred-default.xml</source></property>
108+<property><name>fs.s3.sleepTimeSeconds</name><value>10</value><source>core-default.xml</source></property>
109+<property><name>yarn.scheduler.maximum-allocation-vcores</name><value>32</value><source>yarn-default.xml</source></property>
110+<property><name>rpc.engine.org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB</name><value>org.apache.hadoop.ipc.ProtobufRpcEngine</value><source>programatically</source></property>
111+<property><name>hadoop.ssl.server.conf</name><value>ssl-server.xml</value><source>core-default.xml</source></property>
112+<property><name>ha.zookeeper.parent-znode</name><value>/hadoop-ha</value><source>core-default.xml</source></property>
113+<property><name>io.seqfile.lazydecompress</name><value>true</value><source>core-default.xml</source></property>
114+<property><name>yarn.client.app-submission.poll-interval</name><value>1000</value><source>yarn-default.xml</source></property>
115+<property><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value><source>mapred-default.xml</source></property>
116+<property><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value><source>mapred-default.xml</source></property>
117+<property><name>ipc.client.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
118+<property><name>yarn.resourcemanager.cluster.id</name><value>yarn-rm-cluster</value><source>yarn-default.xml</source></property>
119+<property><name>mapreduce.jobtracker.tasktracker.maxblacklists</name><value>4</value><source>mapred-default.xml</source></property>
120+<property><name>s3.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
121+<property><name>nfs3.server.port</name><value>2049</value><source>core-default.xml</source></property>
122+<property><name>mapreduce.job.jvm.numtasks</name><value>1</value><source>mapred-default.xml</source></property>
123+<property><name>mapreduce.task.io.sort.mb</name><value>100</value><source>mapred-default.xml</source></property>
124+<property><name>yarn.resourcemanager.ha.enabled</name><value>false</value><source>yarn-default.xml</source></property>
125+<property><name>io.file.buffer.size</name><value>4096</value><source>core-default.xml</source></property>
126+<property><name>yarn.nodemanager.admin-env</name><value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value><source>yarn-default.xml</source></property>
127+<property><name>mapreduce.job.split.metainfo.maxsize</name><value>10000000</value><source>mapred-default.xml</source></property>
128+<property><name>yarn.resourcemanager.scheduler.monitor.enable</name><value>false</value><source>yarn-default.xml</source></property>
129+<property><name>yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms</name><value>1000</value><source>mapred-default.xml</source></property>
130+<property><name>mapreduce.reduce.maxattempts</name><value>4</value><source>mapred-default.xml</source></property>
131+<property><name>hadoop.security.authentication</name><value>simple</value><source>core-default.xml</source></property>
132+<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value><source>core-default.xml</source></property>
133+<property><name>mapreduce.jobtracker.taskscheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value><source>mapred-default.xml</source></property>
134+<property><name>yarn.app.mapreduce.am.job.task.listener.thread-count</name><value>30</value><source>mapred-default.xml</source></property>
135+<property><name>mapreduce.job.reduces</name><value>1</value><source>mapred-default.xml</source></property>
136+<property><name>mapreduce.map.sort.spill.percent</name><value>0.80</value><source>mapred-default.xml</source></property>
137+<property><name>mapreduce.job.end-notification.retry.interval</name><value>1000</value><source>mapred-default.xml</source></property>
138+<property><name>mapreduce.jobhistory.minicluster.fixed.ports</name><value>false</value><source>mapred-default.xml</source></property>
139+<property><name>mapreduce.job.maps</name><value>2</value><source>mapred-default.xml</source></property>
140+<property><name>mapreduce.job.speculative.slownodethreshold</name><value>1.0</value><source>mapred-default.xml</source></property>
141+<property><name>tfile.fs.input.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
142+<property><name>mapreduce.map.speculative</name><value>true</value><source>mapred-default.xml</source></property>
143+<property><name>mapreduce.job.acl-view-job</name><value> </value><source>mapred-default.xml</source></property>
144+<property><name>yarn.resourcemanager.zk.state-store.num-retries</name><value>3</value><source>yarn-default.xml</source></property>
145+<property><name>mapreduce.reduce.shuffle.retry-delay.max.ms</name><value>60000</value><source>mapred-default.xml</source></property>
146+<property><name>rpc.engine.org.apache.hadoop.yarn.api.ApplicationClientProtocolPB</name><value>org.apache.hadoop.ipc.ProtobufRpcEngine</value><source>programatically</source></property>
147+<property><name>mapreduce.job.end-notification.max.retry.interval</name><value>5000</value><source>mapred-default.xml</source></property>
148+<property><name>yarn.ipc.serializer.type</name><value>protocolbuffers</value><source>yarn-default.xml</source></property>
149+<property><name>mapreduce.tasktracker.http.threads</name><value>40</value><source>mapred-default.xml</source></property>
150+<property><name>ftp.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
151+<property><name>ha.failover-controller.cli-check.rpc-timeout.ms</name><value>20000</value><source>core-default.xml</source></property>
152+<property><name>mapreduce.job.token.tracking.ids.enabled</name><value>false</value><source>mapred-default.xml</source></property>
153+<property><name>yarn.nodemanager.resourcemanager.connect.wait.secs</name><value>900</value><source>yarn-default.xml</source></property>
154+<property><name>mapreduce.task.skip.start.attempts</name><value>2</value><source>mapred-default.xml</source></property>
155+<property><name>mapreduce.jobtracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value><source>mapred-default.xml</source></property>
156+<property><name>ipc.client.kill.max</name><value>10</value><source>core-default.xml</source></property>
157+<property><name>yarn.nodemanager.linux-container-executor.cgroups.mount</name><value>false</value><source>yarn-default.xml</source></property>
158+<property><name>mapreduce.jobhistory.keytab</name><value>/etc/security/keytab/jhs.service.keytab</value><source>mapred-default.xml</source></property>
159+<property><name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name><value>/hadoop-yarn</value><source>yarn-default.xml</source></property>
160+<property><name>mapreduce.job.end-notification.max.attempts</name><value>5</value><source>mapred-default.xml</source></property>
161+<property><name>mapreduce.jobhistory.max-age-ms</name><value>604800000</value><source>mapred-default.xml</source></property>
162+<property><name>yarn.http.policy</name><value>HTTP_ONLY</value><source>yarn-default.xml</source></property>
163+<property><name>mapreduce.task.tmp.dir</name><value>./tmp</value><source>mapred-default.xml</source></property>
164+<property><name>hadoop.http.filter.initializers</name><value>org.apache.hadoop.http.lib.StaticUserWebFilter</value><source>core-default.xml</source></property>
165+<property><name>hadoop.http.authentication.type</name><value>simple</value><source>core-default.xml</source></property>
166+<property><name>yarn.resourcemanager.client.thread-count</name><value>50</value><source>yarn-default.xml</source></property>
167+<property><name>ipc.server.listen.queue.size</name><value>128</value><source>core-default.xml</source></property>
168+<property><name>mapreduce.reduce.skip.maxgroups</name><value>0</value><source>mapred-default.xml</source></property>
169+<property><name>file.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
170+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value><source>yarn-default.xml</source></property>
171+<property><name>io.mapfile.bloom.size</name><value>1048576</value><source>core-default.xml</source></property>
172+<property><name>yarn.nodemanager.container-executor.class</name><value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value><source>yarn-default.xml</source></property>
173+<property><name>mapreduce.map.maxattempts</name><value>4</value><source>mapred-default.xml</source></property>
174+<property><name>mapreduce.jobtracker.jobhistory.block.size</name><value>3145728</value><source>mapred-default.xml</source></property>
175+<property><name>yarn.log-aggregation.retain-seconds</name><value>-1</value><source>yarn-default.xml</source></property>
176+<property><name>yarn.app.mapreduce.am.job.committer.cancel-timeout</name><value>60000</value><source>mapred-default.xml</source></property>
177+<property><name>ftp.replication</name><value>3</value><source>core-default.xml</source></property>
178+<property><name>mapreduce.jobtracker.http.address</name><value>0.0.0.0:50030</value><source>mapred-default.xml</source></property>
179+<property><name>mapreduce.jobhistory.intermediate-done-dir</name><value>${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate</value><source>mapred-default.xml</source></property>
180+<property><name>yarn.nodemanager.health-checker.script.timeout-ms</name><value>1200000</value><source>yarn-default.xml</source></property>
181+<property><name>mapreduce.jobhistory.address</name><value>0.0.0.0:10020</value><source>mapred-default.xml</source></property>
182+<property><name>mapreduce.jobtracker.taskcache.levels</name><value>2</value><source>mapred-default.xml</source></property>
183+<property><name>yarn.nodemanager.log.retain-seconds</name><value>10800</value><source>yarn-default.xml</source></property>
184+<property><name>yarn.nodemanager.local-cache.max-files-per-directory</name><value>8192</value><source>yarn-default.xml</source></property>
185+<property><name>mapred.child.java.opts</name><value>-Xmx200m</value><source>mapred-default.xml</source></property>
186+<property><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value><source>mapred-default.xml</source></property>
187+<property><name>hadoop.util.hash.type</name><value>murmur</value><source>core-default.xml</source></property>
188+<property><name>mapreduce.jobhistory.move.interval-ms</name><value>180000</value><source>mapred-default.xml</source></property>
189+<property><name>mapreduce.reduce.skip.proc.count.autoincr</name><value>true</value><source>mapred-default.xml</source></property>
190+<property><name>yarn.nodemanager.container-monitor.interval-ms</name><value>3000</value><source>yarn-default.xml</source></property>
191+<property><name>yarn.client.nodemanager-client-async.thread-pool-max-size</name><value>500</value><source>yarn-default.xml</source></property>
192+<property><name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name><value>0.25</value><source>yarn-default.xml</source></property>
193+<property><name>mapreduce.jobhistory.http.policy</name><value>HTTP_ONLY</value><source>mapred-default.xml</source></property>
194+<property><name>ha.zookeeper.acl</name><value>world:anyone:rwcda</value><source>core-default.xml</source></property>
195+<property><name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name><value>250</value><source>yarn-default.xml</source></property>
196+<property><name>io.map.index.skip</name><value>0</value><source>core-default.xml</source></property>
197+<property><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value><source>core-default.xml</source></property>
198+<property><name>fs.s3.maxRetries</name><value>4</value><source>core-default.xml</source></property>
199+<property><name>ha.failover-controller.new-active.rpc-timeout.ms</name><value>60000</value><source>core-default.xml</source></property>
200+<property><name>s3native.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
201+<property><name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name><value>1000</value><source>yarn-default.xml</source></property>
202+<property><name>hadoop.http.staticuser.user</name><value>dr.who</value><source>core-default.xml</source></property>
203+<property><name>mapreduce.reduce.speculative</name><value>true</value><source>mapred-default.xml</source></property>
204+<property><name>mapreduce.client.output.filter</name><value>FAILED</value><source>mapred-default.xml</source></property>
205+<property><name>mapreduce.jobhistory.datestring.cache.size</name><value>200000</value><source>mapred-default.xml</source></property>
206+<property><name>mapreduce.ifile.readahead.bytes</name><value>4194304</value><source>mapred-default.xml</source></property>
207+<property><name>mapreduce.tasktracker.report.address</name><value>127.0.0.1:0</value><source>mapred-default.xml</source></property>
208+<property><name>mapreduce.task.userlog.limit.kb</name><value>0</value><source>mapred-default.xml</source></property>
209+<property><name>mapreduce.tasktracker.map.tasks.maximum</name><value>2</value><source>mapred-default.xml</source></property>
210+<property><name>hadoop.http.authentication.simple.anonymous.allowed</name><value>true</value><source>core-default.xml</source></property>
211+<property><name>mapreduce.job.classloader.system.classes</name><value>java.,javax.,org.apache.commons.logging.,org.apache.log4j.,org.apache.hadoop.</value><source>mapred-default.xml</source></property>
212+<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value><source>core-default.xml</source></property>
213+<property><name>yarn.nodemanager.resourcemanager.connect.retry_interval.secs</name><value>30</value><source>yarn-default.xml</source></property>
214+<property><name>yarn.resourcemanager.connect.max-wait.ms</name><value>900000</value><source>yarn-default.xml</source></property>
215+<property><name>fs.automatic.close</name><value>true</value><source>core-default.xml</source></property>
216+<property><name>mapreduce.tasktracker.healthchecker.script.timeout</name><value>600000</value><source>mapred-default.xml</source></property>
217+<property><name>yarn.resourcemanager.address</name><value>localhost:8032</value><source>programatically</source></property>
218+<property><name>yarn.resourcemanager.zk.state-store.parent-path</name><value>/rmstore</value><source>yarn-default.xml</source></property>
219+<property><name>yarn.nodemanager.health-checker.interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
220+<property><name>yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs</name><value>86400</value><source>yarn-default.xml</source></property>
221+<property><name>mapreduce.reduce.markreset.buffer.percent</name><value>0.0</value><source>mapred-default.xml</source></property>
222+<property><name>hadoop.security.group.mapping.ldap.directory.search.timeout</name><value>10000</value><source>core-default.xml</source></property>
223+<property><name>mapreduce.map.log.level</name><value>INFO</value><source>mapred-default.xml</source></property>
224+<property><name>yarn.nodemanager.localizer.address</name><value>${yarn.nodemanager.hostname}:8040</value><source>yarn-default.xml</source></property>
225+<property><name>ftp.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
226+<property><name>yarn.resourcemanager.keytab</name><value>/etc/krb5.keytab</value><source>yarn-default.xml</source></property>
227+<property><name>ha.health-monitor.rpc-timeout.ms</name><value>45000</value><source>core-default.xml</source></property>
228+<property><name>hadoop.security.group.mapping.ldap.search.attr.member</name><value>member</value><source>core-default.xml</source></property>
229+<property><name>mapreduce.job.classloader</name><value>false</value><source>mapred-default.xml</source></property>
230+<property><name>yarn.resourcemanager.ha.automatic-failover.controller.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.RMFailoverControllerZKImpl</value><source>yarn-default.xml</source></property>
231+<property><name>yarn.nm.liveness-monitor.expiry-interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
232+<property><name>io.compression.codec.bzip2.library</name><value>system-native</value><source>core-default.xml</source></property>
233+<property><name>hadoop.http.authentication.token.validity</name><value>36000</value><source>core-default.xml</source></property>
234+<property><name>mapreduce.job.hdfs-servers</name><value>${fs.defaultFS}</value><source>yarn-default.xml</source></property>
235+<property><name>s3native.replication</name><value>3</value><source>core-default.xml</source></property>
236+<property><name>rpc.engine.org.apache.hadoop.ipc.ProtocolMetaInfoPB</name><value>org.apache.hadoop.ipc.ProtobufRpcEngine</value><source>programatically</source></property>
237+<property><name>yarn.nodemanager.localizer.client.thread-count</name><value>5</value><source>yarn-default.xml</source></property>
238+<property><name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
239+<property><name>dfs.ha.fencing.ssh.connect-timeout</name><value>30000</value><source>core-default.xml</source></property>
240+<property><name>yarn.am.liveness-monitor.expiry-interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
241+<property><name>net.topology.impl</name><value>org.apache.hadoop.net.NetworkTopology</value><source>core-default.xml</source></property>
242+<property><name>mapreduce.task.profile</name><value>false</value><source>mapred-default.xml</source></property>
243+<property><name>mapreduce.tasktracker.http.address</name><value>0.0.0.0:50060</value><source>mapred-default.xml</source></property>
244+<property><name>yarn.nodemanager.linux-container-executor.resources-handler.class</name><value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value><source>yarn-default.xml</source></property>
245+<property><name>mapreduce.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value><source>mapred-default.xml</source></property>
246+<property><name>mapreduce.jobhistory.webapp.address</name><value>0.0.0.0:19888</value><source>mapred-default.xml</source></property>
247+<property><name>yarn.ipc.rpc.class</name><value>org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC</value><source>yarn-default.xml</source></property>
248+<property><name>ha.failover-controller.graceful-fence.rpc-timeout.ms</name><value>5000</value><source>core-default.xml</source></property>
249+<property><name>yarn.resourcemanager.zk.state-store.timeout.ms</name><value>60000</value><source>yarn-default.xml</source></property>
250+<property><name>yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs</name><value>86400</value><source>yarn-default.xml</source></property>
251+<property><name>yarn.resourcemanager.am.max-attempts</name><value>2</value><source>yarn-default.xml</source></property>
252+<property><name>mapreduce.job.ubertask.maxmaps</name><value>9</value><source>mapred-default.xml</source></property>
253+<property><name>yarn.scheduler.maximum-allocation-mb</name><value>8192</value><source>yarn-default.xml</source></property>
254+<property><name>yarn.resourcemanager.webapp.https.address</name><value>${yarn.resourcemanager.hostname}:8090</value><source>yarn-default.xml</source></property>
255+<property><name>mapreduce.job.userlog.retain.hours</name><value>24</value><source>mapred-default.xml</source></property>
256+<property><name>yarn.nodemanager.linux-container-executor.nonsecure-mode.user-pattern</name><value>^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$</value><source>yarn-default.xml</source></property>
257+<property><name>mapreduce.task.timeout</name><value>600000</value><source>mapred-default.xml</source></property>
258+<property><name>mapreduce.jobhistory.loadedjobs.cache.size</name><value>5</value><source>mapred-default.xml</source></property>
259+<property><name>mapreduce.framework.name</name><value>yarn</value><source>mapred-site.xml</source></property>
260+<property><name>ipc.client.idlethreshold</name><value>4000</value><source>core-default.xml</source></property>
261+<property><name>ipc.server.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
262+<property><name>ftp.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
263+<property><name>yarn.resourcemanager.hostname</name><value>0.0.0.0</value><source>yarn-default.xml</source></property>
264+<property><name>s3.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
265+<property><name>mapreduce.job.speculative.slowtaskthreshold</name><value>1.0</value><source>mapred-default.xml</source></property>
266+<property><name>yarn.nodemanager.localizer.cache.target-size-mb</name><value>10240</value><source>yarn-default.xml</source></property>
267+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/var/log/hadoop-yarn/apps</value><source>yarn-site.xml</source></property>
268+<property><name>fs.s3.block.size</name><value>67108864</value><source>core-default.xml</source></property>
269+<property><name>mapreduce.job.queuename</name><value>default</value><source>mapred-default.xml</source></property>
270+<property><name>yarn.scheduler.minimum-allocation-mb</name><value>1024</value><source>yarn-default.xml</source></property>
271+<property><name>hadoop.rpc.protection</name><value>authentication</value><source>core-default.xml</source></property>
272+<property><name>yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user</name><value>nobody</value><source>yarn-default.xml</source></property>
273+<property><name>yarn.app.mapreduce.client-am.ipc.max-retries</name><value>3</value><source>mapred-default.xml</source></property>
274+<property><name>yarn.resourcemanager.ha.automatic-failover.port</name><value>8035</value><source>yarn-default.xml</source></property>
275+<property><name>ftp.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
276+<property><name>yarn.nodemanager.address</name><value>${yarn.nodemanager.hostname}:0</value><source>yarn-default.xml</source></property>
277+<property><name>fs.defaultFS</name><value>hdfs://localhost:9000</value><source>core-site.xml</source></property>
278+<property><name>mapreduce.task.merge.progress.records</name><value>10000</value><source>mapred-default.xml</source></property>
279+<property><name>yarn.resourcemanager.scheduler.client.thread-count</name><value>50</value><source>yarn-default.xml</source></property>
280+<property><name>file.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
281+<property><name>mapreduce.reduce.cpu.vcores</name><value>1</value><source>mapred-default.xml</source></property>
282+<property><name>yarn.nodemanager.delete.thread-count</name><value>4</value><source>yarn-default.xml</source></property>
283+<property><name>yarn.resourcemanager.scheduler.address</name><value>localhost:8030</value><source>programatically</source></property>
284+<property><name>fs.trash.checkpoint.interval</name><value>0</value><source>core-default.xml</source></property>
285+<property><name>hadoop.http.authentication.signature.secret.file</name><value>${user.home}/hadoop-http-auth-signature-secret</value><source>core-default.xml</source></property>
286+<property><name>s3native.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
287+<property><name>mapreduce.reduce.shuffle.read.timeout</name><value>180000</value><source>mapred-default.xml</source></property>
288+<property><name>yarn.app.mapreduce.am.command-opts</name><value>-Xmx1024m</value><source>mapred-default.xml</source></property>
289+<property><name>mapreduce.admin.user.env</name><value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native</value><source>mapred-default.xml</source></property>
290+<property><name>mapreduce.local.clientfactory.class.name</name><value>org.apache.hadoop.mapred.LocalClientFactory</value><source>mapred-default.xml</source></property>
291+<property><name>fs.permissions.umask-mode</name><value>022</value><source>core-default.xml</source></property>
292+<property><name>mapreduce.jobhistory.move.thread-count</name><value>3</value><source>mapred-default.xml</source></property>
293+<property><name>hadoop.common.configuration.version</name><value>0.23.0</value><source>core-default.xml</source></property>
294+<property><name>mapreduce.tasktracker.dns.interface</name><value>default</value><source>mapred-default.xml</source></property>
295+<property><name>mapreduce.output.fileoutputformat.compress.type</name><value>RECORD</value><source>mapred-default.xml</source></property>
296+<property><name>yarn.resourcemanager.connect.retry-interval.ms</name><value>30000</value><source>yarn-default.xml</source></property>
297+<property><name>mapreduce.ifile.readahead</name><value>true</value><source>mapred-default.xml</source></property>
298+<property><name>hadoop.security.group.mapping.ldap.ssl</name><value>false</value><source>core-default.xml</source></property>
299+<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value><source>core-default.xml</source></property>
300+<property><name>fs.df.interval</name><value>60000</value><source>core-default.xml</source></property>
301+<property><name>mapreduce.reduce.shuffle.input.buffer.percent</name><value>0.70</value><source>mapred-default.xml</source></property>
302+<property><name>io.seqfile.compress.blocksize</name><value>1000000</value><source>core-default.xml</source></property>
303+<property><name>ipc.client.connect.max.retries</name><value>10</value><source>core-default.xml</source></property>
304+<property><name>hadoop.security.groups.cache.secs</name><value>300</value><source>core-default.xml</source></property>
305+<property><name>yarn.nodemanager.process-kill-wait.ms</name><value>2000</value><source>yarn-default.xml</source></property>
306+<property><name>yarn.nodemanager.vmem-check-enabled</name><value>true</value><source>yarn-default.xml</source></property>
307+<property><name>yarn.application.classpath</name><value>
308+ $HADOOP_CONF_DIR,
309+ $HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,
310+ $HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,
311+ $HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,
312+ $HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*
313+ </value><source>yarn-site.xml</source></property>
314+<property><name>yarn.app.mapreduce.client.max-retries</name><value>3</value><source>mapred-default.xml</source></property>
315+<property><name>yarn.nodemanager.log-aggregation.compression-type</name><value>none</value><source>yarn-default.xml</source></property>
316+<property><name>hadoop.security.group.mapping.ldap.search.filter.user</name><value>(&amp;(objectClass=user)(sAMAccountName={0}))</value><source>core-default.xml</source></property>
317+<property><name>yarn.nodemanager.localizer.cache.cleanup.interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
318+<property><name>yarn.nodemanager.log-dirs</name><value>/var/log/hadoop-yarn/containers</value><source>yarn-site.xml</source></property>
319+<property><name>fs.s3n.block.size</name><value>67108864</value><source>core-default.xml</source></property>
320+<property><name>fs.ftp.host</name><value>0.0.0.0</value><source>core-default.xml</source></property>
321+<property><name>hadoop.security.group.mapping</name><value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value><source>core-default.xml</source></property>
322+<property><name>yarn.app.mapreduce.am.resource.cpu-vcores</name><value>1</value><source>mapred-default.xml</source></property>
323+<property><name>mapreduce.jobhistory.cleaner.enable</name><value>true</value><source>mapred-default.xml</source></property>
324+<property><name>mapreduce.map.skip.maxrecords</name><value>0</value><source>mapred-default.xml</source></property>
325+<property><name>yarn.scheduler.minimum-allocation-vcores</name><value>1</value><source>yarn-default.xml</source></property>
326+<property><name>file.replication</name><value>1</value><source>core-default.xml</source></property>
327+<property><name>yarn.resourcemanager.resource-tracker.address</name><value>localhost:8031</value><source>programatically</source></property>
328+<property><name>mapreduce.jobtracker.restart.recover</name><value>false</value><source>mapred-default.xml</source></property>
329+<property><name>hadoop.work.around.non.threadsafe.getpwuid</name><value>false</value><source>core-default.xml</source></property>
330+<property><name>mapreduce.tasktracker.indexcache.mb</name><value>10</value><source>mapred-default.xml</source></property>
331+<property><name>mapreduce.output.fileoutputformat.compress</name><value>false</value><source>mapred-default.xml</source></property>
332+<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value><source>core-default.xml</source></property>
333+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value><source>yarn-default.xml</source></property>
334+<property><name>hadoop.kerberos.kinit.command</name><value>kinit</value><source>core-default.xml</source></property>
335+<property><name>mapreduce.job.committer.setup.cleanup.needed</name><value>true</value><source>mapred-default.xml</source></property>
336+<property><name>mapreduce.task.profile.reduces</name><value>0-2</value><source>mapred-default.xml</source></property>
337+<property><name>file.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
338+<property><name>mapreduce.jobtracker.handler.count</name><value>10</value><source>mapred-default.xml</source></property>
339+<property><name>yarn.app.mapreduce.am.job.committer.commit-window</name><value>10000</value><source>mapred-default.xml</source></property>
340+<property><name>yarn.resourcemanager.ha.admin.address</name><value>${yarn.resourcemanager.hostname}:8034</value><source>yarn-default.xml</source></property>
341+<property><name>yarn.dispatcher.exit-on-error</name><value>true</value><source>programatically</source></property>
342+<property><name>net.topology.script.number.args</name><value>100</value><source>core-default.xml</source></property>
343+<property><name>mapreduce.task.profile.maps</name><value>0-2</value><source>mapred-default.xml</source></property>
344+<property><name>yarn.resourcemanager.webapp.address</name><value>${yarn.resourcemanager.hostname}:8088</value><source>yarn-default.xml</source></property>
345+<property><name>mapreduce.jobtracker.system.dir</name><value>${hadoop.tmp.dir}/mapred/system</value><source>mapred-default.xml</source></property>
346+<property><name>hadoop.ssl.hostname.verifier</name><value>DEFAULT</value><source>core-default.xml</source></property>
347+<property><name>yarn.nodemanager.vmem-pmem-ratio</name><value>2.1</value><source>yarn-default.xml</source></property>
348+<property><name>yarn.nodemanager.hostname</name><value>0.0.0.0</value><source>yarn-default.xml</source></property>
349+<property><name>ipc.client.connect.timeout</name><value>20000</value><source>core-default.xml</source></property>
350+<property><name>mapreduce.jobhistory.principal</name><value>jhs/_HOST@REALM.TLD</value><source>mapred-default.xml</source></property>
351+<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value><source>core-default.xml</source></property>
352+<property><name>mapreduce.shuffle.ssl.file.buffer.size</name><value>65536</value><source>mapred-default.xml</source></property>
353+<property><name>mapreduce.jobtracker.expire.trackers.interval</name><value>600000</value><source>mapred-default.xml</source></property>
354+<property><name>mapreduce.cluster.acls.enabled</name><value>false</value><source>mapred-default.xml</source></property>
355+<property><name>yarn.nodemanager.remote-app-log-dir-suffix</name><value>logs</value><source>yarn-default.xml</source></property>
356+<property><name>ha.failover-controller.graceful-fence.connection.retries</name><value>1</value><source>core-default.xml</source></property>
357+<property><name>ha.health-monitor.connect-retry-interval.ms</name><value>1000</value><source>core-default.xml</source></property>
358+<property><name>yarn.resourcemanager.zk.state-store.acl</name><value>world:anyone:rwcda</value><source>yarn-default.xml</source></property>
359+<property><name>yarn.app.mapreduce.am.resource.mb</name><value>1536</value><source>mapred-default.xml</source></property>
360+<property><name>io.seqfile.local.dir</name><value>${hadoop.tmp.dir}/io/local</value><source>core-default.xml</source></property>
361+<property><name>mapreduce.reduce.shuffle.merge.percent</name><value>0.66</value><source>mapred-default.xml</source></property>
362+<property><name>tfile.io.chunk.size</name><value>1048576</value><source>core-default.xml</source></property>
363+<property><name>file.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
364+<property><name>mapreduce.jobtracker.jobhistory.lru.cache.size</name><value>5</value><source>mapred-default.xml</source></property>
365+<property><name>mapreduce.jobtracker.maxtasks.perjob</name><value>-1</value><source>mapred-default.xml</source></property>
366+<property><name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name><value>1000</value><source>yarn-default.xml</source></property>
367+<property><name>mapreduce.job.acl-modify-job</name><value> </value><source>mapred-default.xml</source></property>
368+<property><name>yarn.nodemanager.webapp.address</name><value>${yarn.nodemanager.hostname}:8042</value><source>yarn-default.xml</source></property>
369+<property><name>mapreduce.am.max-attempts</name><value>2</value><source>mapred-default.xml</source></property>
370+<property><name>mapreduce.tasktracker.reduce.tasks.maximum</name><value>2</value><source>mapred-default.xml</source></property>
371+<property><name>mapreduce.cluster.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value><source>mapred-default.xml</source></property>
372+<property><name>io.skip.checksum.errors</name><value>false</value><source>core-default.xml</source></property>
373+<property><name>mapreduce.jobhistory.joblist.cache.size</name><value>20000</value><source>mapred-default.xml</source></property>
374+<property><name>yarn.app.mapreduce.am.staging-dir</name><value>/tmp/hadoop-yarn/staging</value><source>mapred-default.xml</source></property>
375+<property><name>fs.ftp.host.port</name><value>21</value><source>core-default.xml</source></property>
376+<property><name>yarn.resourcemanager.admin.client.thread-count</name><value>1</value><source>yarn-default.xml</source></property>
377+<property><name>fs.AbstractFileSystem.viewfs.impl</name><value>org.apache.hadoop.fs.viewfs.ViewFs</value><source>core-default.xml</source></property>
378+<property><name>yarn.resourcemanager.resource-tracker.client.thread-count</name><value>50</value><source>yarn-default.xml</source></property>
379+<property><name>mapreduce.tasktracker.dns.nameserver</name><value>default</value><source>mapred-default.xml</source></property>
380+<property><name>ipc.client.fallback-to-simple-auth-allowed</name><value>false</value><source>core-default.xml</source></property>
381+<property><name>mapreduce.map.output.compress</name><value>false</value><source>mapred-default.xml</source></property>
382+<property><name>yarn.nodemanager.delete.debug-delay-sec</name><value>0</value><source>yarn-default.xml</source></property>
383+<property><name>hadoop.ssl.require.client.cert</name><value>false</value><source>core-default.xml</source></property>
384+</configuration>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/default.tsv (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/default.tsv (revision 579)
@@ -0,0 +1,375 @@
1+name value
2+dfs.ha.fencing.ssh.connect-timeout 30000
3+file.blocksize 67108864
4+file.bytes-per-checksum 512
5+file.client-write-packet-size 65536
6+file.replication 1
7+file.stream-buffer-size 4096
8+fs.AbstractFileSystem.file.impl org.apache.hadoop.fs.local.LocalFs
9+fs.AbstractFileSystem.hdfs.impl org.apache.hadoop.fs.Hdfs
10+fs.AbstractFileSystem.viewfs.impl org.apache.hadoop.fs.viewfs.ViewFs
11+fs.automatic.close true
12+fs.client.resolve.remote.symlinks true
13+fs.defaultFS hdfs://localhost:9000
14+fs.df.interval 60000
15+fs.ftp.host 0.0.0.0
16+fs.ftp.host.port 21
17+fs.permissions.umask-mode 22
18+fs.s3.block.size 67108864
19+fs.s3.buffer.dir ${hadoop.tmp.dir}/s3
20+fs.s3.maxRetries 4
21+fs.s3.sleepTimeSeconds 10
22+fs.s3n.block.size 67108864
23+fs.trash.checkpoint.interval 0
24+fs.trash.interval 0
25+ftp.blocksize 67108864
26+ftp.bytes-per-checksum 512
27+ftp.client-write-packet-size 65536
28+ftp.replication 3
29+ftp.stream-buffer-size 4096
30+ha.failover-controller.cli-check.rpc-timeout.ms 20000
31+ha.failover-controller.graceful-fence.connection.retries 1
32+ha.failover-controller.graceful-fence.rpc-timeout.ms 5000
33+ha.failover-controller.new-active.rpc-timeout.ms 60000
34+ha.health-monitor.check-interval.ms 1000
35+ha.health-monitor.connect-retry-interval.ms 1000
36+ha.health-monitor.rpc-timeout.ms 45000
37+ha.health-monitor.sleep-after-disconnect.ms 1000
38+ha.zookeeper.acl world:anyone:rwcda
39+ha.zookeeper.parent-znode /hadoop-ha
40+ha.zookeeper.session-timeout.ms 5000
41+hadoop.common.configuration.version 0.23.0
42+hadoop.http.authentication.kerberos.keytab ${user.home}/hadoop.keytab
43+hadoop.http.authentication.kerberos.principal HTTP/_HOST@LOCALHOST
44+hadoop.http.authentication.signature.secret.file ${user.home}/hadoop-http-auth-signature-secret
45+hadoop.http.authentication.simple.anonymous.allowed true
46+hadoop.http.authentication.token.validity 36000
47+hadoop.http.authentication.type simple
48+hadoop.http.filter.initializers org.apache.hadoop.http.lib.StaticUserWebFilter
49+hadoop.http.staticuser.user dr.who
50+hadoop.jetty.logs.serve.aliases true
51+hadoop.kerberos.kinit.command kinit
52+hadoop.rpc.protection authentication
53+hadoop.rpc.socket.factory.class.default org.apache.hadoop.net.StandardSocketFactory
54+hadoop.security.authentication simple
55+hadoop.security.authorization false
56+hadoop.security.group.mapping org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback
57+hadoop.security.group.mapping.ldap.directory.search.timeout 10000
58+hadoop.security.group.mapping.ldap.search.attr.group.name cn
59+hadoop.security.group.mapping.ldap.search.attr.member member
60+hadoop.security.group.mapping.ldap.search.filter.group (objectClass=group)
61+hadoop.security.group.mapping.ldap.search.filter.user (&(objectClass=user)(sAMAccountName={0}))
62+hadoop.security.group.mapping.ldap.ssl false
63+hadoop.security.groups.cache.secs 300
64+hadoop.security.instrumentation.requires.admin false
65+hadoop.security.uid.cache.secs 14400
66+hadoop.ssl.client.conf ssl-client.xml
67+hadoop.ssl.enabled false
68+hadoop.ssl.hostname.verifier DEFAULT
69+hadoop.ssl.keystores.factory.class org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory
70+hadoop.ssl.require.client.cert false
71+hadoop.ssl.server.conf ssl-server.xml
72+hadoop.tmp.dir /tmp/hadoop-${user.name}
73+hadoop.util.hash.type murmur
74+hadoop.work.around.non.threadsafe.getpwuid false
75+io.bytes.per.checksum 512
76+io.compression.codec.bzip2.library system-native
77+io.file.buffer.size 4096
78+io.map.index.interval 128
79+io.map.index.skip 0
80+io.mapfile.bloom.error.rate 0.005
81+io.mapfile.bloom.size 1048576
82+io.native.lib.available true
83+io.seqfile.compress.blocksize 1000000
84+io.seqfile.lazydecompress true
85+io.seqfile.local.dir ${hadoop.tmp.dir}/io/local
86+io.seqfile.sorter.recordlimit 1000000
87+io.serializations org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization
88+io.skip.checksum.errors false
89+ipc.client.connect.max.retries 10
90+ipc.client.connect.max.retries.on.timeouts 45
91+ipc.client.connect.timeout 20000
92+ipc.client.connection.maxidletime 10000
93+ipc.client.fallback-to-simple-auth-allowed false
94+ipc.client.idlethreshold 4000
95+ipc.client.kill.max 10
96+ipc.client.tcpnodelay false
97+ipc.server.listen.queue.size 128
98+ipc.server.tcpnodelay false
99+map.sort.class org.apache.hadoop.util.QuickSort
100+mapred.child.java.opts -Xmx200m
101+mapreduce.admin.user.env LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native
102+mapreduce.am.max-attempts 2
103+mapreduce.client.completion.pollinterval 5000
104+mapreduce.client.output.filter FAILED
105+mapreduce.client.progressmonitor.pollinterval 1000
106+mapreduce.client.submit.file.replication 10
107+mapreduce.cluster.acls.enabled false
108+mapreduce.cluster.local.dir ${hadoop.tmp.dir}/mapred/local
109+mapreduce.cluster.temp.dir ${hadoop.tmp.dir}/mapred/temp
110+mapreduce.framework.name yarn
111+mapreduce.ifile.readahead true
112+mapreduce.ifile.readahead.bytes 4194304
113+mapreduce.input.fileinputformat.split.minsize 0
114+mapreduce.job.acl-modify-job
115+mapreduce.job.acl-view-job
116+mapreduce.job.classloader false
117+mapreduce.job.classloader.system.classes java.,javax.,org.apache.commons.logging.,org.apache.log4j.,org.apache.hadoop.
118+mapreduce.job.committer.setup.cleanup.needed true
119+mapreduce.job.complete.cancel.delegation.tokens true
120+mapreduce.job.counters.max 120
121+mapreduce.job.end-notification.max.attempts 5
122+mapreduce.job.end-notification.max.retry.interval 5000
123+mapreduce.job.end-notification.retry.attempts 0
124+mapreduce.job.end-notification.retry.interval 1000
125+mapreduce.job.hdfs-servers ${fs.defaultFS}
126+mapreduce.job.jvm.numtasks 1
127+mapreduce.job.map.output.collector.class org.apache.hadoop.mapred.MapTask$MapOutputBuffer
128+mapreduce.job.maps 2
129+mapreduce.job.maxtaskfailures.per.tracker 3
130+mapreduce.job.queuename default
131+mapreduce.job.reduce.shuffle.consumer.plugin.class org.apache.hadoop.mapreduce.task.reduce.Shuffle
132+mapreduce.job.reduce.slowstart.completedmaps 0.05
133+mapreduce.job.reduces 1
134+mapreduce.job.speculative.slownodethreshold 1
135+mapreduce.job.speculative.slowtaskthreshold 1
136+mapreduce.job.speculative.speculativecap 0.1
137+mapreduce.job.split.metainfo.maxsize 10000000
138+mapreduce.job.token.tracking.ids.enabled false
139+mapreduce.job.ubertask.enable false
140+mapreduce.job.ubertask.maxmaps 9
141+mapreduce.job.ubertask.maxreduces 1
142+mapreduce.job.userlog.retain.hours 24
143+mapreduce.jobhistory.address 0.0.0.0:10020
144+mapreduce.jobhistory.cleaner.enable true
145+mapreduce.jobhistory.cleaner.interval-ms 86400000
146+mapreduce.jobhistory.client.thread-count 10
147+mapreduce.jobhistory.datestring.cache.size 200000
148+mapreduce.jobhistory.done-dir ${yarn.app.mapreduce.am.staging-dir}/history/done
149+mapreduce.jobhistory.http.policy HTTP_ONLY
150+mapreduce.jobhistory.intermediate-done-dir ${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate
151+mapreduce.jobhistory.joblist.cache.size 20000
152+mapreduce.jobhistory.keytab /etc/security/keytab/jhs.service.keytab
153+mapreduce.jobhistory.loadedjobs.cache.size 5
154+mapreduce.jobhistory.max-age-ms 604800000
155+mapreduce.jobhistory.minicluster.fixed.ports false
156+mapreduce.jobhistory.move.interval-ms 180000
157+mapreduce.jobhistory.move.thread-count 3
158+mapreduce.jobhistory.principal jhs/_HOST@REALM.TLD
159+mapreduce.jobhistory.webapp.address 0.0.0.0:19888
160+mapreduce.jobtracker.address local
161+mapreduce.jobtracker.expire.trackers.interval 600000
162+mapreduce.jobtracker.handler.count 10
163+mapreduce.jobtracker.heartbeats.in.second 100
164+mapreduce.jobtracker.http.address 0.0.0.0:50030
165+mapreduce.jobtracker.instrumentation org.apache.hadoop.mapred.JobTrackerMetricsInst
166+mapreduce.jobtracker.jobhistory.block.size 3145728
167+mapreduce.jobtracker.jobhistory.lru.cache.size 5
168+mapreduce.jobtracker.jobhistory.task.numberprogresssplits 12
169+mapreduce.jobtracker.maxtasks.perjob -1
170+mapreduce.jobtracker.persist.jobstatus.active true
171+mapreduce.jobtracker.persist.jobstatus.dir /jobtracker/jobsInfo
172+mapreduce.jobtracker.persist.jobstatus.hours 1
173+mapreduce.jobtracker.restart.recover false
174+mapreduce.jobtracker.retiredjobs.cache.size 1000
175+mapreduce.jobtracker.staging.root.dir ${hadoop.tmp.dir}/mapred/staging
176+mapreduce.jobtracker.system.dir ${hadoop.tmp.dir}/mapred/system
177+mapreduce.jobtracker.taskcache.levels 2
178+mapreduce.jobtracker.taskscheduler org.apache.hadoop.mapred.JobQueueTaskScheduler
179+mapreduce.jobtracker.tasktracker.maxblacklists 4
180+mapreduce.local.clientfactory.class.name org.apache.hadoop.mapred.LocalClientFactory
181+mapreduce.map.cpu.vcores 1
182+mapreduce.map.log.level INFO
183+mapreduce.map.maxattempts 4
184+mapreduce.map.output.compress false
185+mapreduce.map.output.compress.codec org.apache.hadoop.io.compress.DefaultCodec
186+mapreduce.map.skip.maxrecords 0
187+mapreduce.map.skip.proc.count.autoincr true
188+mapreduce.map.sort.spill.percent 0.8
189+mapreduce.map.speculative true
190+mapreduce.output.fileoutputformat.compress false
191+mapreduce.output.fileoutputformat.compress.codec org.apache.hadoop.io.compress.DefaultCodec
192+mapreduce.output.fileoutputformat.compress.type RECORD
193+mapreduce.reduce.cpu.vcores 1
194+mapreduce.reduce.input.buffer.percent 0
195+mapreduce.reduce.log.level INFO
196+mapreduce.reduce.markreset.buffer.percent 0
197+mapreduce.reduce.maxattempts 4
198+mapreduce.reduce.merge.inmem.threshold 1000
199+mapreduce.reduce.shuffle.connect.timeout 180000
200+mapreduce.reduce.shuffle.input.buffer.percent 0.7
201+mapreduce.reduce.shuffle.memory.limit.percent 0.25
202+mapreduce.reduce.shuffle.merge.percent 0.66
203+mapreduce.reduce.shuffle.parallelcopies 5
204+mapreduce.reduce.shuffle.read.timeout 180000
205+mapreduce.reduce.shuffle.retry-delay.max.ms 60000
206+mapreduce.reduce.skip.maxgroups 0
207+mapreduce.reduce.skip.proc.count.autoincr true
208+mapreduce.reduce.speculative true
209+mapreduce.shuffle.max.connections 0
210+mapreduce.shuffle.port 13562
211+mapreduce.shuffle.ssl.enabled false
212+mapreduce.shuffle.ssl.file.buffer.size 65536
213+mapreduce.task.files.preserve.failedtasks false
214+mapreduce.task.io.sort.factor 10
215+mapreduce.task.io.sort.mb 100
216+mapreduce.task.merge.progress.records 10000
217+mapreduce.task.profile false
218+mapreduce.task.profile.maps 0-2
219+mapreduce.task.profile.reduces 0-2
220+mapreduce.task.skip.start.attempts 2
221+mapreduce.task.timeout 600000
222+mapreduce.task.tmp.dir ./tmp
223+mapreduce.task.userlog.limit.kb 0
224+mapreduce.tasktracker.dns.interface default
225+mapreduce.tasktracker.dns.nameserver default
226+mapreduce.tasktracker.healthchecker.interval 60000
227+mapreduce.tasktracker.healthchecker.script.timeout 600000
228+mapreduce.tasktracker.http.address 0.0.0.0:50060
229+mapreduce.tasktracker.http.threads 40
230+mapreduce.tasktracker.indexcache.mb 10
231+mapreduce.tasktracker.instrumentation org.apache.hadoop.mapred.TaskTrackerMetricsInst
232+mapreduce.tasktracker.local.dir.minspacekill 0
233+mapreduce.tasktracker.local.dir.minspacestart 0
234+mapreduce.tasktracker.map.tasks.maximum 2
235+mapreduce.tasktracker.outofband.heartbeat false
236+mapreduce.tasktracker.reduce.tasks.maximum 2
237+mapreduce.tasktracker.report.address 127.0.0.1:0
238+mapreduce.tasktracker.taskcontroller org.apache.hadoop.mapred.DefaultTaskController
239+mapreduce.tasktracker.taskmemorymanager.monitoringinterval 5000
240+mapreduce.tasktracker.tasks.sleeptimebeforesigkill 5000
241+net.topology.impl org.apache.hadoop.net.NetworkTopology
242+net.topology.node.switch.mapping.impl org.apache.hadoop.net.ScriptBasedMapping
243+net.topology.script.number.args 100
244+nfs3.mountd.port 4242
245+nfs3.server.port 2049
246+rpc.engine.org.apache.hadoop.ipc.ProtocolMetaInfoPB org.apache.hadoop.ipc.ProtobufRpcEngine
247+rpc.engine.org.apache.hadoop.yarn.api.ApplicationClientProtocolPB org.apache.hadoop.ipc.ProtobufRpcEngine
248+rpc.engine.org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB org.apache.hadoop.ipc.ProtobufRpcEngine
249+rpc.engine.org.apache.hadoop.yarn.server.api.ResourceTrackerPB org.apache.hadoop.ipc.ProtobufRpcEngine
250+s3.blocksize 67108864
251+s3.bytes-per-checksum 512
252+s3.client-write-packet-size 65536
253+s3.replication 3
254+s3.stream-buffer-size 4096
255+s3native.blocksize 67108864
256+s3native.bytes-per-checksum 512
257+s3native.client-write-packet-size 65536
258+s3native.replication 3
259+s3native.stream-buffer-size 4096
260+tfile.fs.input.buffer.size 262144
261+tfile.fs.output.buffer.size 262144
262+tfile.io.chunk.size 1048576
263+yarn.acl.enable true
264+yarn.admin.acl *
265+yarn.am.liveness-monitor.expiry-interval-ms 600000
266+yarn.app.mapreduce.am.command-opts -Xmx1024m
267+yarn.app.mapreduce.am.job.committer.cancel-timeout 60000
268+yarn.app.mapreduce.am.job.committer.commit-window 10000
269+yarn.app.mapreduce.am.job.task.listener.thread-count 30
270+yarn.app.mapreduce.am.resource.cpu-vcores 1
271+yarn.app.mapreduce.am.resource.mb 1536
272+yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms 1000
273+yarn.app.mapreduce.am.staging-dir /tmp/hadoop-yarn/staging
274+yarn.app.mapreduce.client-am.ipc.max-retries 3
275+yarn.app.mapreduce.client.max-retries 3
276+yarn.application.classpath $HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*, $HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*, $HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*, $HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*
277+yarn.client.app-submission.poll-interval 1000
278+yarn.client.max-nodemanagers-proxies 500
279+yarn.client.nodemanager-client-async.thread-pool-max-size 500
280+yarn.dispatcher.exit-on-error true
281+yarn.http.policy HTTP_ONLY
282+yarn.ipc.rpc.class org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC
283+yarn.ipc.serializer.type protocolbuffers
284+yarn.log-aggregation-enable true
285+yarn.log-aggregation.retain-check-interval-seconds -1
286+yarn.log-aggregation.retain-seconds -1
287+yarn.nm.liveness-monitor.expiry-interval-ms 600000
288+yarn.nodemanager.address ${yarn.nodemanager.hostname}:0
289+yarn.nodemanager.admin-env MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
290+yarn.nodemanager.aux-services mapreduce_shuffle
291+yarn.nodemanager.aux-services.mapreduce_shuffle.class org.apache.hadoop.mapred.ShuffleHandler
292+yarn.nodemanager.container-executor.class org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor
293+yarn.nodemanager.container-manager.thread-count 20
294+yarn.nodemanager.container-monitor.interval-ms 3000
295+yarn.nodemanager.delete.debug-delay-sec 0
296+yarn.nodemanager.delete.thread-count 4
297+yarn.nodemanager.disk-health-checker.interval-ms 120000
298+yarn.nodemanager.disk-health-checker.min-healthy-disks 0.25
299+yarn.nodemanager.env-whitelist JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME
300+yarn.nodemanager.health-checker.interval-ms 600000
301+yarn.nodemanager.health-checker.script.timeout-ms 1200000
302+yarn.nodemanager.hostname 0.0.0.0
303+yarn.nodemanager.keytab /etc/krb5.keytab
304+yarn.nodemanager.linux-container-executor.cgroups.hierarchy /hadoop-yarn
305+yarn.nodemanager.linux-container-executor.cgroups.mount false
306+yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user nobody
307+yarn.nodemanager.linux-container-executor.nonsecure-mode.user-pattern ^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$
308+yarn.nodemanager.linux-container-executor.resources-handler.class org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler
309+yarn.nodemanager.local-cache.max-files-per-directory 8192
310+yarn.nodemanager.local-dirs /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir
311+yarn.nodemanager.localizer.address ${yarn.nodemanager.hostname}:8040
312+yarn.nodemanager.localizer.cache.cleanup.interval-ms 600000
313+yarn.nodemanager.localizer.cache.target-size-mb 10240
314+yarn.nodemanager.localizer.client.thread-count 5
315+yarn.nodemanager.localizer.fetch.thread-count 4
316+yarn.nodemanager.log-aggregation.compression-type none
317+yarn.nodemanager.log-dirs /var/log/hadoop-yarn/containers
318+yarn.nodemanager.log.retain-seconds 10800
319+yarn.nodemanager.pmem-check-enabled true
320+yarn.nodemanager.process-kill-wait.ms 2000
321+yarn.nodemanager.remote-app-log-dir /var/log/hadoop-yarn/apps
322+yarn.nodemanager.remote-app-log-dir-suffix logs
323+yarn.nodemanager.resource.cpu-vcores 8
324+yarn.nodemanager.resource.memory-mb 8192
325+yarn.nodemanager.resourcemanager.connect.retry_interval.secs 30
326+yarn.nodemanager.resourcemanager.connect.wait.secs 900
327+yarn.nodemanager.sleep-delay-before-sigkill.ms 250
328+yarn.nodemanager.vmem-check-enabled true
329+yarn.nodemanager.vmem-pmem-ratio 2.1
330+yarn.nodemanager.webapp.address ${yarn.nodemanager.hostname}:8042
331+yarn.resourcemanager.address localhost:8032
332+yarn.resourcemanager.admin.address localhost:8033
333+yarn.resourcemanager.admin.client.thread-count 1
334+yarn.resourcemanager.am.max-attempts 2
335+yarn.resourcemanager.amliveliness-monitor.interval-ms 1000
336+yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs 86400
337+yarn.resourcemanager.client.thread-count 50
338+yarn.resourcemanager.cluster.id yarn-rm-cluster
339+yarn.resourcemanager.connect.max-wait.ms 900000
340+yarn.resourcemanager.connect.retry-interval.ms 30000
341+yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs 86400
342+yarn.resourcemanager.container.liveness-monitor.interval-ms 600000
343+yarn.resourcemanager.delayed.delegation-token.removal-interval-ms 30000
344+yarn.resourcemanager.fs.state-store.uri ${hadoop.tmp.dir}/yarn/system/rmstore
345+yarn.resourcemanager.ha.admin.address ${yarn.resourcemanager.hostname}:8034
346+yarn.resourcemanager.ha.admin.client.thread-count 1
347+yarn.resourcemanager.ha.automatic-failover.controller.class org.apache.hadoop.yarn.server.resourcemanager.RMFailoverControllerZKImpl
348+yarn.resourcemanager.ha.automatic-failover.enabled false
349+yarn.resourcemanager.ha.automatic-failover.port 8035
350+yarn.resourcemanager.ha.enabled false
351+yarn.resourcemanager.ha.fencer org.apache.hadoop.yarn.server.resourcemanager.ZKStoreNodeFencer
352+yarn.resourcemanager.hostname 0.0.0.0
353+yarn.resourcemanager.keytab /etc/krb5.keytab
354+yarn.resourcemanager.max-completed-applications 10000
355+yarn.resourcemanager.nm.liveness-monitor.interval-ms 1000
356+yarn.resourcemanager.nodemanagers.heartbeat-interval-ms 1000
357+yarn.resourcemanager.recovery.enabled false
358+yarn.resourcemanager.resource-tracker.address localhost:8031
359+yarn.resourcemanager.resource-tracker.client.thread-count 50
360+yarn.resourcemanager.scheduler.address localhost:8030
361+yarn.resourcemanager.scheduler.class org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
362+yarn.resourcemanager.scheduler.client.thread-count 50
363+yarn.resourcemanager.scheduler.monitor.enable false
364+yarn.resourcemanager.scheduler.monitor.policies org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy
365+yarn.resourcemanager.store.class org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
366+yarn.resourcemanager.webapp.address ${yarn.resourcemanager.hostname}:8088
367+yarn.resourcemanager.webapp.https.address ${yarn.resourcemanager.hostname}:8090
368+yarn.resourcemanager.zk.state-store.acl world:anyone:rwcda
369+yarn.resourcemanager.zk.state-store.num-retries 3
370+yarn.resourcemanager.zk.state-store.parent-path /rmstore
371+yarn.resourcemanager.zk.state-store.timeout.ms 60000
372+yarn.scheduler.maximum-allocation-mb 8192
373+yarn.scheduler.maximum-allocation-vcores 32
374+yarn.scheduler.minimum-allocation-mb 1024
375+yarn.scheduler.minimum-allocation-vcores 1
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/configuration.xsl (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/configuration.xsl (revision 579)
@@ -0,0 +1,24 @@
1+<?xml version="1.0"?>
2+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
3+<xsl:output method="html"/>
4+<xsl:template match="configuration">
5+<html>
6+<body>
7+<table border="1">
8+<tr>
9+ <th>name</th>
10+ <th>value</th>
11+ <th>source</th>
12+</tr>
13+<xsl:for-each select="property">
14+<tr>
15+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
16+ <td><xsl:value-of select="value"/></td>
17+ <td><xsl:value-of select="source"/></td>
18+</tr>
19+</xsl:for-each>
20+</table>
21+</body>
22+</html>
23+</xsl:template>
24+</xsl:stylesheet>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/hdfs-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/hdfs-site.xml (revision 579)
@@ -0,0 +1,158 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.secondary.namenode.fqdn</name>
22+ <value>localhost</value>
23+ <!-- <value>${this.cluster.name}-cn.${this.domain}</value> -->
24+ </property>
25+
26+ <property>
27+ <name>dfs.namenode.name.dir</name>
28+ <value>file:///grid/vol/0/var/lib/${user.name}/name</value>
29+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/name,file:///export/home/${user.name}/var/lib/name</value> -->
30+ </property>
31+ <property>
32+ <name>dfs.datanode.data.dir</name>
33+ <value>file:///grid/vol/0/var/lib/${user.name}/data</value>
34+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/data,file:///grid/vol/1/var/lib/${user.name}/data</value> -->
35+ </property>
36+ <property>
37+ <name>dfs.namenode.checkpoint.dir</name>
38+ <value>file:///grid/vol/0/var/lib/${user.name}/checkpoint</value>
39+ <!-- <value>file:///grid/vol/0/var/lib/${user.name}/checkpoint,file:///export/home/${user.name}/var/lib/checkpoint</value> -->
40+ </property>
41+ <property>
42+ <name>dfs.replication</name>
43+ <value>1</value>
44+ <!-- <value>3</value> -->
45+ </property>
46+
47+ <property>
48+ <name>dfs.hosts</name>
49+ <value>/etc/hadoop/conf/hosts.include</value>
50+ <description>
51+ Names a file that contains a list of hosts that are permitted to connect to the namenode.
52+ The full pathname of the file must be specified. If the value is empty, all hosts are permitted.
53+ </description>
54+ </property>
55+ <property>
56+ <name>dfs.hosts.exclude</name>
57+ <value>/etc/hadoop/conf/hosts.exclude</value>
58+ <description>
59+ Names a file that contains a list of hosts that are not permitted to connect to the namenode.
60+ The full pathname of the file must be specified. If the value is empty, no hosts are excluded.
61+ </description>
62+ </property>
63+
64+ <property>
65+ <name>dfs.namenode.kerberos.principal</name>
66+ <value>hdfs/_HOST@${this.realm}</value>
67+ <!-- _HOST is replaced with the fs.defaultFS's host name -->
68+ <!-- <value>hdfs/${this.namenode.fqdn}@${this.realm}</value> -->
69+ <description>Kerberos principal name for the NameNode</description>
70+ </property>
71+ <property>
72+ <name>dfs.namenode.keytab.file</name>
73+ <value>${this.keytab.dir}/nn.keytab</value>
74+ <description>
75+ Combined keytab file containing the namenode service and host
76+ principals.
77+ </description>
78+ </property>
79+ <property>
80+ <name>dfs.secondary.namenode.kerberos.principal</name>
81+ <value>hdfs/${this.secondary.namenode.fqdn}@${this.realm}</value>
82+ <!-- <value>hdfs/_HOST@${this.realm}</value> -->
83+ <description>
84+ Kerberos principal name for the secondary NameNode.
85+ </description>
86+ </property>
87+ <property>
88+ <name>dfs.secondary.namenode.keytab.file</name>
89+ <value>${this.keytab.dir}/cn.keytab</value>
90+ <description>
91+ Combined keytab file containing the namenode service and host
92+ principals.
93+ </description>
94+ </property>
95+ <property>
96+ <name>dfs.block.access.token.enable</name>
97+ <value>true</value>
98+ <description>
99+ If "true", access tokens are used as capabilities for accessing
100+ datanodes.
101+ If "false", no access tokens are checked on accessing datanodes.
102+ </description>
103+ </property>
104+ <property>
105+ <name>dfs.datanode.kerberos.principal</name>
106+ <value>hdfs/localhost@${this.realm}</value>
107+ <!-- <value>hdfs/_HOST@${this.realm}</value> -->
108+ <description>
109+ The Kerberos principal that the DataNode runs as. "_HOST" is
110+ replaced by the real host name.
111+ </description>
112+ </property>
113+ <property>
114+ <name>dfs.datanode.keytab.file</name>
115+ <value>${this.keytab.dir}/dn.keytab</value>
116+ <description>
117+ The filename of the keytab file for the DataNode.
118+ </description>
119+ </property>
120+ <property>
121+ <name>dfs.namenode.kerberos.internal.spnego.principal</name>
122+ <value>${dfs.web.authentication.kerberos.principal}</value>
123+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
124+ <!-- _HOST is replaced with dfs.namenode.http-address's host name. -->
125+ </property>
126+ <property>
127+ <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
128+ <value>HTTP/${this.secondary.namenode.fqdn}@${this.realm}</value>
129+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
130+ <!-- _HOST is replaced with dfs.namenode.secondary.http-address's host name. -->
131+ </property>
132+
133+ <property>
134+ <name>dfs.datanode.address</name>
135+ <value>0.0.0.0:1004</value>
136+ </property>
137+ <property>
138+ <name>dfs.datanode.http.address</name>
139+ <value>0.0.0.0:1006</value>
140+ </property>
141+
142+ <property>
143+ <name>dfs.namenode.http-address</name>
144+ <value>${this.namenode.fqdn}:50070</value>
145+ </property>
146+ <property>
147+ <name>dfs.namenode.secondary.http-address</name>
148+ <value>${this.secondary.namenode.fqdn}:50090</value>
149+ </property>
150+ <property>
151+ <name>dfs.web.authentication.kerberos.principal</name>
152+ <value>HTTP/_HOST@${this.realm}</value>
153+ </property>
154+ <property>
155+ <name>dfs.web.authentication.kerberos.keytab</name>
156+ <value>${this.keytab.dir}/HTTP.keytab</value>
157+ </property>
158+</configuration>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/core-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/core-site.xml (revision 579)
@@ -0,0 +1,142 @@
1+<?xml version="1.0" encoding="UTF-8"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.cluster.name</name>
22+ <value>localhost</value>
23+ <!-- <value>pleiades</value> -->
24+ </property>
25+ <property>
26+ <name>this.domain</name>
27+ <value>localhost</value>
28+ <!-- <value>grid.example.com</value> -->
29+ </property>
30+ <property>
31+ <name>this.realm</name>
32+ <value>LOCALDOMAIN</value>
33+ <!-- <value>GRID.EXAMPLE.COM</value> -->
34+ </property>
35+ <property>
36+ <name>this.keytab.dir</name>
37+ <value>/grid/etc/keytabs/localhost</value>
38+ </property>
39+ <property>
40+ <name>this.namenode.fqdn</name>
41+ <value>localhost</value>
42+ <!-- <value>${this.cluster.name}-nn.${this.domain}</value> -->
43+ </property>
44+
45+ <property>
46+ <name>fs.defaultFS</name>
47+ <value>hdfs://${this.namenode.fqdn}:9000</value>
48+ </property>
49+ <property>
50+ <name>hadoop.tmp.dir</name>
51+ <value>/tmp/hadoop-${user.name}</value>
52+ </property>
53+
54+ <property>
55+ <name>hadoop.security.authentication</name>
56+ <value>kerberos</value>
57+ <description>
58+ Set the authentication for the cluster. Valid values are: simple or
59+ kerberos.
60+ </description>
61+ </property>
62+ <property>
63+ <name>hadoop.security.authorization</name>
64+ <value>true</value>
65+ <description>
66+ Enable authorization for different protocols.
67+ </description>
68+ </property>
69+ <property>
70+ <name>hadoop.security.auth_to_local</name>
71+ <value>
72+ RULE:[2:$1@$0](.*@${this.realm})s/@.*//
73+ RULE:[1:$1@$0](.*@${this.realm})s/@.*//
74+ RULE:[2:$1@$0](hdfs@.*${this.realm})s/.*/hdfs/
75+ RULE:[2:$1@$0](yarn@.*${this.realm})s/.*/yarn/
76+ RULE:[2:$1@$0](mapred@.*${this.realm})s/.*/mapred/
77+ DEFAULT</value>
78+ </property>
79+ <property>
80+ <name>hadoop.security.group.mapping</name>
81+ <value>org.apache.hadoop.security.JniBasedUnixGroupsMapping</value>
82+ </property>
83+ <property>
84+ <name>hadoop.security.groups.cache.secs</name>
85+ <value>14400</value>
86+ </property>
87+ <property>
88+ <name>hadoop.kerberos.kinit.command</name>
89+ <value>/usr/bin/kinit</value>
90+ </property>
91+
92+ <property>
93+ <name>hadoop.http.filter.initializers</name>
94+ <value>org.apache.hadoop.security.AuthenticationFilterInitializer</value>
95+ <!-- <value>org.apache.hadoop.http.lib.StaticUserWebFilter</value> -->
96+ <description>The name of a class that initializes an input filter for Jetty.
97+ This filter will always return Dr.Who as the web user when the servlets
98+ query for the authenticated user </description>
99+ </property>
100+ <property>
101+ <name>hadoop.http.authentication.signature.secret.file</name>
102+ <value>/grid/etc/hadoop-http-auth-signature-secret</value>
103+ </property>
104+ <property>
105+ <name>hadoop.http.authentication.cookie.domain</name>
106+ <value>${this.domain}</value>
107+ </property>
108+ <property>
109+ <name>hadoop.http.authentication.type</name>
110+ <value>simple</value>
111+ <description>Defines authentication used for the HTTP web-consoles.
112+ The supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#.
113+ The dfeault value is simple.</description>
114+ </property>
115+ <property>
116+ <name>hadoop.http.authentication.kerberos.principal</name>
117+ <value>HTTP/localhost@${this.realm}</value>
118+ <!-- <value>HTTP/_HOST@${this.realm}</value>
119+ _HOST N/A!: v1.0, HDP1.2; OK: v2.0, CDH3, CDH4 -->
120+ </property>
121+ <property>
122+ <name>hadoop.http.authentication.kerberos.keytab</name>
123+ <value>${this.keytab.dir}/HTTP.keytab</value>
124+ </property>
125+
126+ <property>
127+ <name>hadoop.proxyuser.oozie.hosts</name>
128+ <value>localhost</value>
129+ </property>
130+ <property>
131+ <name>hadoop.proxyuser.oozie.groups</name>
132+ <value>hadoopers</value>
133+ </property>
134+ <property>
135+ <name>hadoop.proxyuser.httpfs.hosts</name>
136+ <value>localhost</value>
137+ </property>
138+ <property>
139+ <name>hadoop.proxyuser.httpfs.groups</name>
140+ <value>hadoopers</value>
141+ </property>
142+</configuration>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/mapred-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/mapred-site.xml (revision 579)
@@ -0,0 +1,89 @@
1+<?xml version="1.0"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+ Licensed under the Apache License, Version 2.0 (the "License");
5+ you may not use this file except in compliance with the License.
6+ You may obtain a copy of the License at
7+
8+ http://www.apache.org/licenses/LICENSE-2.0
9+
10+ Unless required by applicable law or agreed to in writing, software
11+ distributed under the License is distributed on an "AS IS" BASIS,
12+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+ See the License for the specific language governing permissions and
14+ limitations under the License. See accompanying LICENSE file.
15+-->
16+
17+<!-- Put site-specific property overrides in this file. -->
18+
19+<configuration>
20+ <property>
21+ <name>this.jobhistory.fqdn</name>
22+ <value>localhost</value>
23+ <!-- <value>${this.cluster.name}-jt.${this.domain}</value> -->
24+ <!-- <value>${this.cluster.name}-jh.${this.domain}</value> -->
25+ </property>
26+
27+ <property>
28+ <name>mapreduce.framework.name</name>
29+ <value>yarn</value>
30+ <description>The runtime framework for executing MapReduce jobs.
31+ Can be one of local, classic or yarn.
32+ (default: local)
33+ </description>
34+ </property>
35+ <property>
36+ <name>yarn.app.mapreduce.am.staging-dir</name>
37+ <value>/user</value>
38+ </property>
39+ <property>
40+ <name>mapreduce.jobhistory.intermediate-done-dir</name>
41+ <value>/grid/vol/0/var/lib/mapred/history/done_intermediate</value>
42+ <!-- NG: <value>/user</value> -->
43+ </property>
44+ <property>
45+ <name>mapreduce.jobhistory.done-dir</name>
46+ <value>/grid/vol/0/var/lib/mapred/history/done</value>
47+ </property>
48+
49+ <!-- NOT necessary.
50+ <property>
51+ <name>mapreduce.cluster.local.dir</name>
52+ <value>/grid/vol/0/var/lib/mapred/local</value>
53+ <description>
54+ The local directory where MapReduce stores intermediate data files.
55+ May be a comma-separated list of directories on different devices in order to spread disk i/o.
56+ Directories that do not exist are ignored.
57+ </description>
58+ </property>
59+ -->
60+ <!-- NOT necessary.
61+ <property>
62+ <name>mapreduce.cluster.temp.dir</name>
63+ <value>/grid/vol/0/tmp/mapred</value>
64+ <description>
65+ A shared directory for temporary files.
66+ </description>
67+ </property>
68+ -->
69+
70+ <property>
71+ <name>mapreduce.jobhistory.principal</name>
72+ <value>mapred/${this.jobhistory.fqdn}@${this.realm}</value>
73+ <!-- <value>mapred/_HOST@${this.realm}</value> -->
74+ </property>
75+ <property>
76+ <name>mapreduce.jobhistory.keytab</name>
77+ <value>${this.keytab.dir}/jh.keytab</value>
78+ </property>
79+
80+ <property>
81+ <name>mapreduce.jobhistory.webapp.spnego-principal</name>
82+ <value>HTTP/${this.jobhistory.fqdn}@${this.realm}</value>
83+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
84+ </property>
85+ <property>
86+ <name>mapreduce.jobhistory.webapp.spnego-keytab-file</name>
87+ <value>${this.keytab.dir}/HTTP.keytab</value>
88+ </property>
89+</configuration>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/yarn-site.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/yarn-site.xml (revision 579)
@@ -0,0 +1,181 @@
1+<?xml version="1.0"?>
2+<!--
3+ Licensed under the Apache License, Version 2.0 (the "License");
4+ you may not use this file except in compliance with the License.
5+ You may obtain a copy of the License at
6+
7+ http://www.apache.org/licenses/LICENSE-2.0
8+
9+ Unless required by applicable law or agreed to in writing, software
10+ distributed under the License is distributed on an "AS IS" BASIS,
11+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+ See the License for the specific language governing permissions and
13+ limitations under the License. See accompanying LICENSE file.
14+-->
15+<configuration>
16+
17+<!-- Site specific YARN configuration properties -->
18+ <property>
19+ <name>this.resourcemanager.fqdn</name>
20+ <value>localhost</value>
21+ <!-- <value>${this.cluster.name}-rm.${this.domain}</value> -->
22+ </property>
23+
24+ <property>
25+ <name>yarn.log-aggregation-enable</name>
26+ <value>true</value>
27+ </property>
28+ <property>
29+ <name>yarn.resourcemanager.scheduler.class</name>
30+ <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
31+ <description>In case you do not want to use the default scheduler</description>
32+ </property>
33+ <property>
34+ <name>yarn.nodemanager.local-dirs</name>
35+ <value>/grid/vol/0/var/lib/${user.name}/nm/local</value>
36+ <!-- <value>/grid/vol/0/var/lib/${user.name}/nm/local,/grid/vol/1/var/lib/${user.name}/nm/local</value> -->
37+ <description>the local directories used by the nodemanager
38+ (default: /tmp/nm-local-dir)</description>
39+ </property>
40+ <property>
41+ <name>yarn.nodemanager.resource.memory-mb</name>
42+ <value>8192</value>
43+ <description>Amount of physical memory, in MB, that can be allocated
44+ for containers. (default: 8192)</description>
45+ </property>
46+ <property>
47+ <name>yarn.nodemanager.resource.cpu-cores</name>
48+ <value>8</value>
49+ <description>Number of CPU cores that can be allocated
50+ for containers. (default: 8)</description>
51+ </property>
52+ <property>
53+ <name>yarn.nodemanager.remote-app-log-dir</name>
54+ <value>/grid/vol/0/var/log/${user.name}/nm</value>
55+ <description>directory on hdfs where the application logs are moved to
56+ (default: /tmp/logs)</description>
57+ </property>
58+ <property>
59+ <name>yarn.nodemanager.log-dirs</name>
60+ <value>/grid/vol/0/var/log/${user.name}/nm</value>
61+ <!-- <value>/grid/vol/0/var/log/${user.name}/nm,/grid/vol/1/var/log/${user.name}/nm</value> -->
62+ <description>the directories used by Nodemanagers as log directories
63+ (default: /tmp/logs)</description>
64+ </property>
65+ <property>
66+ <name>yarn.nodemanager.aux-services</name>
67+ <value>mapreduce_shuffle</value>
68+ <description>shuffle service that needs to be set for Map Reduce to run</description>
69+ </property>
70+ <property>
71+ <name>yarn.application.classpath</name>
72+ <value>
73+ $HADOOP_CONF_DIR,
74+ $HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,
75+ $HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,
76+ $HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,
77+ $HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*</value>
78+ <description>Classpath for typical applications.</description>
79+ </property>
80+
81+ <property>
82+ <name>yarn.resourcemanager.nodes.include-path</name>
83+ <value>/etc/hadoop/conf/hosts.include</value>
84+ <description>Path to file with nodes to include.</description>
85+ </property>
86+ <property>
87+ <name>yarn.resourcemanager.nodes.exclude-path</name>
88+ <value>/etc/hadoop/conf/hosts.exclude</value>
89+ <description>Path to file with nodes to exclude.</description>
90+ </property>
91+
92+ <property>
93+ <name>yarn.nodemanager.admin-env</name>
94+ <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX,LD_LIBRARY_PATH=${HADOOP_COMMON_HOME}/lib/native</value>
95+ </property>
96+
97+ <property>
98+ <name>yarn.acl.enable</name>
99+ <value>true</value>
100+ </property>
101+ <property>
102+ <name>yarn.admin.acl</name>
103+ <value> yarn,gridops</value>
104+ </property>
105+ <property>
106+ <name>yarn.resourcemanager.principal</name>
107+ <value>yarn/${this.resourcemanager.fqdn}@${this.realm}</value>
108+ <!-- <value>yarn/_HOST@${this.realm}</value> -->
109+ </property>
110+ <property>
111+ <name>yarn.resourcemanager.keytab</name>
112+ <value>${this.keytab.dir}/rm.keytab</value>
113+ </property>
114+ <property>
115+ <name>yarn.nodemanager.principal</name>
116+ <value>yarn/localhost@${this.realm}</value>
117+ <!-- <value>yarn/_HOST@${this.realm}</value> -->
118+ </property>
119+ <property>
120+ <name>yarn.nodemanager.keytab</name>
121+ <value>${this.keytab.dir}/nm.keytab</value>
122+ </property>
123+
124+ <property>
125+ <name>yarn.nodemanager.container-executor.class</name>
126+ <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
127+ </property>
128+ <property>
129+ <name>yarn.nodemanager.linux-container-executor.group</name>
130+ <value>yarn</value>
131+ </property>
132+ <property>
133+ <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
134+ <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
135+ <description>The class which should help the LCE handle resources.</description>
136+ </property>
137+ <property>
138+ <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
139+ <value>/hadoop-yarn</value>
140+ <description>The cgroups hierarchy under which to place YARN proccesses (cannot contain commas).
141+ If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have
142+ been pre-configured), then this cgroups hierarchy must already exist and be writable by the
143+ NodeManager user, otherwise the NodeManager may fail.
144+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
145+ </property>
146+ <property>
147+ <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
148+ <value>false</value>
149+ <description>Whether the LCE should attempt to mount cgroups if not found.
150+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description>
151+ </property>
152+ <property>
153+ <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
154+ <value></value>
155+ <description>Where the LCE should attempt to mount cgroups if not found. Common locations
156+ include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux
157+ distribution in use. This path must exist before the NodeManager is launched.
158+ Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and
159+ yarn.nodemanager.linux-container-executor.cgroups.mount is true.</description>
160+ </property>
161+
162+ <property>
163+ <name>yarn.resourcemanager.webapp.spnego-principal</name>
164+ <value>HTTP/${this.resourcemanager.fqdn}@${this.realm}</value>
165+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
166+ </property>
167+ <property>
168+ <name>yarn.resourcemanager.webapp.spnego-keytab-file</name>
169+ <value>${this.keytab.dir}/HTTP.keytab</value>
170+ </property>
171+ <property>
172+ <name>yarn.nodemanager.webapp.spnego-principal</name>
173+ <value>HTTP/localhost@${this.realm}</value>
174+ <!-- <value>HTTP/_HOST@${this.realm}</value> -->
175+ </property>
176+ <property>
177+ <name>yarn.nodemanager.webapp.spnego-keytab-file</name>
178+ <value>${this.keytab.dir}/HTTP.keytab</value>
179+ </property>
180+</configuration>
181+
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/fair-scheduler.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/fair-scheduler.xml (revision 579)
@@ -0,0 +1,12 @@
1+<?xml version="1.0"?>
2+
3+<!--
4+ This file contains pool and user allocations for the Fair Scheduler.
5+ Its format is explained in the Fair Scheduler documentation at
6+ http://hadoop.apache.org/common/docs/r0.20.205.0/fair_scheduler.html.
7+ The documentation also includes a sample config file.
8+-->
9+
10+<allocations>
11+
12+</allocations>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/hadoop-policy.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/hadoop-policy.xml (revision 579)
@@ -0,0 +1,219 @@
1+<?xml version="1.0"?>
2+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+<!--
4+
5+ Copyright 2011 The Apache Software Foundation
6+
7+ Licensed to the Apache Software Foundation (ASF) under one
8+ or more contributor license agreements. See the NOTICE file
9+ distributed with this work for additional information
10+ regarding copyright ownership. The ASF licenses this file
11+ to you under the Apache License, Version 2.0 (the
12+ "License"); you may not use this file except in compliance
13+ with the License. You may obtain a copy of the License at
14+
15+ http://www.apache.org/licenses/LICENSE-2.0
16+
17+ Unless required by applicable law or agreed to in writing, software
18+ distributed under the License is distributed on an "AS IS" BASIS,
19+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20+ See the License for the specific language governing permissions and
21+ limitations under the License.
22+
23+-->
24+
25+<!-- Put site-specific property overrides in this file. -->
26+
27+<configuration>
28+ <property>
29+ <name>security.client.protocol.acl</name>
30+ <value>*</value>
31+ <description>ACL for ClientProtocol, which is used by user code
32+ via the DistributedFileSystem.
33+ The ACL is a comma-separated list of user and group names. The user and
34+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
35+ A special value of "*" means all users are allowed.</description>
36+ </property>
37+
38+ <property>
39+ <name>security.client.datanode.protocol.acl</name>
40+ <value>*</value>
41+ <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
42+ for block recovery.
43+ The ACL is a comma-separated list of user and group names. The user and
44+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
45+ A special value of "*" means all users are allowed.</description>
46+ </property>
47+
48+ <property>
49+ <name>security.datanode.protocol.acl</name>
50+ <value>*</value>
51+ <description>ACL for DatanodeProtocol, which is used by datanodes to
52+ communicate with the namenode.
53+ The ACL is a comma-separated list of user and group names. The user and
54+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
55+ A special value of "*" means all users are allowed.</description>
56+ </property>
57+
58+ <property>
59+ <name>security.inter.datanode.protocol.acl</name>
60+ <value>*</value>
61+ <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
62+ for updating generation timestamp.
63+ The ACL is a comma-separated list of user and group names. The user and
64+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
65+ A special value of "*" means all users are allowed.</description>
66+ </property>
67+
68+ <property>
69+ <name>security.namenode.protocol.acl</name>
70+ <value>*</value>
71+ <description>ACL for NamenodeProtocol, the protocol used by the secondary
72+ namenode to communicate with the namenode.
73+ The ACL is a comma-separated list of user and group names. The user and
74+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
75+ A special value of "*" means all users are allowed.</description>
76+ </property>
77+
78+ <property>
79+ <name>security.admin.operations.protocol.acl</name>
80+ <value>*</value>
81+ <description>ACL for AdminOperationsProtocol. Used for admin commands.
82+ The ACL is a comma-separated list of user and group names. The user and
83+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
84+ A special value of "*" means all users are allowed.</description>
85+ </property>
86+
87+ <property>
88+ <name>security.refresh.usertogroups.mappings.protocol.acl</name>
89+ <value>*</value>
90+ <description>ACL for RefreshUserMappingsProtocol. Used to refresh
91+ users mappings. The ACL is a comma-separated list of user and
92+ group names. The user and group list is separated by a blank. For
93+ e.g. "alice,bob users,wheel". A special value of "*" means all
94+ users are allowed.</description>
95+ </property>
96+
97+ <property>
98+ <name>security.refresh.policy.protocol.acl</name>
99+ <value>*</value>
100+ <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
101+ dfsadmin and mradmin commands to refresh the security policy in-effect.
102+ The ACL is a comma-separated list of user and group names. The user and
103+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
104+ A special value of "*" means all users are allowed.</description>
105+ </property>
106+
107+ <property>
108+ <name>security.ha.service.protocol.acl</name>
109+ <value>*</value>
110+ <description>ACL for HAService protocol used by HAAdmin to manage the
111+ active and stand-by states of namenode.</description>
112+ </property>
113+
114+ <property>
115+ <name>security.zkfc.protocol.acl</name>
116+ <value>*</value>
117+ <description>ACL for access to the ZK Failover Controller
118+ </description>
119+ </property>
120+
121+ <property>
122+ <name>security.qjournal.service.protocol.acl</name>
123+ <value>*</value>
124+ <description>ACL for QJournalProtocol, used by the NN to communicate with
125+ JNs when using the QuorumJournalManager for edit logs.</description>
126+ </property>
127+
128+ <property>
129+ <name>security.mrhs.client.protocol.acl</name>
130+ <value>*</value>
131+ <description>ACL for HSClientProtocol, used by job clients to
132+ communciate with the MR History Server job status etc.
133+ The ACL is a comma-separated list of user and group names. The user and
134+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
135+ A special value of "*" means all users are allowed.</description>
136+ </property>
137+
138+ <!-- YARN Protocols -->
139+
140+ <property>
141+ <name>security.resourcetracker.protocol.acl</name>
142+ <value>*</value>
143+ <description>ACL for ResourceTrackerProtocol, used by the
144+ ResourceManager and NodeManager to communicate with each other.
145+ The ACL is a comma-separated list of user and group names. The user and
146+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
147+ A special value of "*" means all users are allowed.</description>
148+ </property>
149+
150+ <property>
151+ <name>security.resourcemanager-administration.protocol.acl</name>
152+ <value>*</value>
153+ <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
154+ The ACL is a comma-separated list of user and group names. The user and
155+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
156+ A special value of "*" means all users are allowed.</description>
157+ </property>
158+
159+ <property>
160+ <name>security.applicationclient.protocol.acl</name>
161+ <value>*</value>
162+ <description>ACL for ApplicationClientProtocol, used by the ResourceManager
163+ and applications submission clients to communicate with each other.
164+ The ACL is a comma-separated list of user and group names. The user and
165+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
166+ A special value of "*" means all users are allowed.</description>
167+ </property>
168+
169+ <property>
170+ <name>security.applicationmaster.protocol.acl</name>
171+ <value>*</value>
172+ <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
173+ and ApplicationMasters to communicate with each other.
174+ The ACL is a comma-separated list of user and group names. The user and
175+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
176+ A special value of "*" means all users are allowed.</description>
177+ </property>
178+
179+ <property>
180+ <name>security.containermanagement.protocol.acl</name>
181+ <value>*</value>
182+ <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
183+ and ApplicationMasters to communicate with each other.
184+ The ACL is a comma-separated list of user and group names. The user and
185+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
186+ A special value of "*" means all users are allowed.</description>
187+ </property>
188+
189+ <property>
190+ <name>security.resourcelocalizer.protocol.acl</name>
191+ <value>*</value>
192+ <description>ACL for ResourceLocalizer protocol, used by the NodeManager
193+ and ResourceLocalizer to communicate with each other.
194+ The ACL is a comma-separated list of user and group names. The user and
195+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
196+ A special value of "*" means all users are allowed.</description>
197+ </property>
198+
199+ <property>
200+ <name>security.job.task.protocol.acl</name>
201+ <value>*</value>
202+ <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
203+ tasks to communicate with the parent tasktracker.
204+ The ACL is a comma-separated list of user and group names. The user and
205+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
206+ A special value of "*" means all users are allowed.</description>
207+ </property>
208+
209+ <property>
210+ <name>security.job.client.protocol.acl</name>
211+ <value>*</value>
212+ <description>ACL for MRClientProtocol, used by job clients to
213+ communciate with the MR ApplicationMaster to query job status etc.
214+ The ACL is a comma-separated list of user and group names. The user and
215+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
216+ A special value of "*" means all users are allowed.</description>
217+ </property>
218+
219+</configuration>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/mapred-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/mapred-env.sh (revision 579)
@@ -0,0 +1,25 @@
1+# Licensed to the Apache Software Foundation (ASF) under one or more
2+# contributor license agreements. See the NOTICE file distributed with
3+# this work for additional information regarding copyright ownership.
4+# The ASF licenses this file to You under the Apache License, Version 2.0
5+# (the "License"); you may not use this file except in compliance with
6+# the License. You may obtain a copy of the License at
7+#
8+# http://www.apache.org/licenses/LICENSE-2.0
9+#
10+# Unless required by applicable law or agreed to in writing, software
11+# distributed under the License is distributed on an "AS IS" BASIS,
12+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+# See the License for the specific language governing permissions and
14+# limitations under the License.
15+
16+
17+export JAVA_HOME=/usr/local/jvm/java-7-ora
18+
19+# The directory where pid files are stored. /tmp by default.
20+export HADOOP_MAPRED_PID_DIR=/var/run/hadoop-mapreduce
21+# Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.
22+export HADOOP_MAPRED_LOG_DIR=/grid/vol/0/var/log/${USER}
23+
24+
25+
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/yarn-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/yarn-env.sh (revision 579)
@@ -0,0 +1,123 @@
1+# Licensed to the Apache Software Foundation (ASF) under one or more
2+# contributor license agreements. See the NOTICE file distributed with
3+# this work for additional information regarding copyright ownership.
4+# The ASF licenses this file to You under the Apache License, Version 2.0
5+# (the "License"); you may not use this file except in compliance with
6+# the License. You may obtain a copy of the License at
7+#
8+# http://www.apache.org/licenses/LICENSE-2.0
9+#
10+# Unless required by applicable law or agreed to in writing, software
11+# distributed under the License is distributed on an "AS IS" BASIS,
12+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+# See the License for the specific language governing permissions and
14+# limitations under the License.
15+
16+
17+export JAVA_HOME=/usr/local/jvm/java-7-ora
18+
19+export YARN_LOG_DIR=/grid/vol/0/var/log/${USER}
20+# Do not set $YARN_PID_DIR in this file! -> /etc/default/hadoop-*
21+# (/var/run/hadoop-yarn on the YARN daemons,
22+# /var/run/hadoop-mapreduce on the HistoryServer)
23+#export YARN_PID_DIR=/var/run/hadoop-yarn
24+export MAPRED_LOG_DIR=/grid/vol/0/var/log/mapred
25+
26+
27+# User for YARN daemons
28+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
29+
30+# resolve links - $0 may be a softlink
31+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
32+
33+# some Java parameters
34+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
35+if [ "$JAVA_HOME" != "" ]; then
36+ #echo "run java in $JAVA_HOME"
37+ JAVA_HOME=$JAVA_HOME
38+fi
39+
40+if [ "$JAVA_HOME" = "" ]; then
41+ echo "Error: JAVA_HOME is not set."
42+ exit 1
43+fi
44+
45+JAVA=$JAVA_HOME/bin/java
46+JAVA_HEAP_MAX=-Xmx1000m
47+
48+# For setting YARN specific HEAP sizes please use this
49+# Parameter and set appropriately
50+# YARN_HEAPSIZE=1000
51+
52+# check envvars which might override default args
53+if [ "$YARN_HEAPSIZE" != "" ]; then
54+ JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
55+fi
56+
57+# Resource Manager specific parameters
58+
59+# Specify the max Heapsize for the ResourceManager using a numerical value
60+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
61+# the value to 1000.
62+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
63+# and/or YARN_RESOURCEMANAGER_OPTS.
64+# If not specified, the default value will be picked from either YARN_HEAPMAX
65+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
66+#export YARN_RESOURCEMANAGER_HEAPSIZE=1000
67+
68+# Specify the JVM options to be used when starting the ResourceManager.
69+# These options will be appended to the options specified as YARN_OPTS
70+# and therefore may override any similar flags set in YARN_OPTS
71+#export YARN_RESOURCEMANAGER_OPTS=
72+
73+# Node Manager specific parameters
74+
75+# Specify the max Heapsize for the NodeManager using a numerical value
76+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
77+# the value to 1000.
78+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
79+# and/or YARN_NODEMANAGER_OPTS.
80+# If not specified, the default value will be picked from either YARN_HEAPMAX
81+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
82+#export YARN_NODEMANAGER_HEAPSIZE=1000
83+
84+# Specify the JVM options to be used when starting the NodeManager.
85+# These options will be appended to the options specified as YARN_OPTS
86+# and therefore may override any similar flags set in YARN_OPTS
87+#export YARN_NODEMANAGER_OPTS=
88+
89+# so that filenames w/ spaces are handled correctly in loops below
90+IFS=
91+
92+
93+# default log directory & file
94+if [ "$YARN_LOG_DIR" = "" ]; then
95+ YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
96+fi
97+if [ "$YARN_LOGFILE" = "" ]; then
98+ YARN_LOGFILE='yarn.log'
99+fi
100+
101+# default policy file for service-level authorization
102+if [ "$YARN_POLICYFILE" = "" ]; then
103+ YARN_POLICYFILE="hadoop-policy.xml"
104+fi
105+
106+# restore ordinary behaviour
107+unset IFS
108+
109+
110+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
111+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
112+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
113+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
114+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
115+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
116+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
117+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
118+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
119+ YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
120+fi
121+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
122+
123+
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/capacity-scheduler.xml (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/capacity-scheduler.xml (revision 579)
@@ -0,0 +1,127 @@
1+<!--
2+ Licensed under the Apache License, Version 2.0 (the "License");
3+ you may not use this file except in compliance with the License.
4+ You may obtain a copy of the License at
5+
6+ http://www.apache.org/licenses/LICENSE-2.0
7+
8+ Unless required by applicable law or agreed to in writing, software
9+ distributed under the License is distributed on an "AS IS" BASIS,
10+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+ See the License for the specific language governing permissions and
12+ limitations under the License. See accompanying LICENSE file.
13+-->
14+<configuration>
15+
16+ <property>
17+ <name>yarn.scheduler.capacity.maximum-applications</name>
18+ <value>10000</value>
19+ <description>
20+ Maximum number of applications that can be pending and running.
21+ </description>
22+ </property>
23+
24+ <property>
25+ <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
26+ <value>0.1</value>
27+ <description>
28+ Maximum percent of resources in the cluster which can be used to run
29+ application masters i.e. controls number of concurrent running
30+ applications.
31+ </description>
32+ </property>
33+
34+ <property>
35+ <name>yarn.scheduler.capacity.resource-calculator</name>
36+ <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
37+ <description>
38+ The ResourceCalculator implementation to be used to compare
39+ Resources in the scheduler.
40+ The default i.e. DefaultResourceCalculator only uses Memory while
41+ DominantResourceCalculator uses dominant-resource to compare
42+ multi-dimensional resources such as Memory, CPU etc.
43+ </description>
44+ </property>
45+
46+ <property>
47+ <name>yarn.scheduler.capacity.root.queues</name>
48+ <value>default</value>
49+ <description>
50+ The queues at the this level (root is the root queue).
51+ </description>
52+ </property>
53+
54+ <property>
55+ <name>yarn.scheduler.capacity.root.acl_submit_applications</name>
56+ <value> </value>
57+ <description>
58+ The ACL of who can submit jobs to the root queue.
59+ </description>
60+ </property>
61+
62+ <property>
63+ <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
64+ <value></value>
65+ <description>
66+ The ACL of who can administer jobs on the root queue.
67+ </description>
68+ </property>
69+
70+ <property>
71+ <name>yarn.scheduler.capacity.root.default.capacity</name>
72+ <value>100</value>
73+ <description>Default queue target capacity.</description>
74+ </property>
75+
76+ <property>
77+ <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
78+ <value>1</value>
79+ <description>
80+ Default queue user limit a percentage from 0.0 to 1.0.
81+ </description>
82+ </property>
83+
84+ <property>
85+ <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
86+ <value>100</value>
87+ <description>
88+ The maximum capacity of the default queue.
89+ </description>
90+ </property>
91+
92+ <property>
93+ <name>yarn.scheduler.capacity.root.default.state</name>
94+ <value>RUNNING</value>
95+ <description>
96+ The state of the default queue. State can be one of RUNNING or STOPPED.
97+ </description>
98+ </property>
99+
100+ <property>
101+ <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
102+ <value>*</value>
103+ <description>
104+ The ACL of who can submit jobs to the default queue.
105+ </description>
106+ </property>
107+
108+ <property>
109+ <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
110+ <value> hadoop,gridops</value>
111+ <description>
112+ The ACL of who can administer jobs on the default queue.
113+ </description>
114+ </property>
115+
116+ <property>
117+ <name>yarn.scheduler.capacity.node-locality-delay</name>
118+ <value>-1</value>
119+ <description>
120+ Number of missed scheduling opportunities after which the CapacityScheduler
121+ attempts to schedule rack-local containers.
122+ Typically this should be set to number of racks in the cluster, this
123+ feature is disabled by default, set to -1.
124+ </description>
125+ </property>
126+
127+</configuration>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/hadoop-env.sh (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/hadoop-env.sh (revision 579)
@@ -0,0 +1,47 @@
1+# Copyright 2011 The Apache Software Foundation
2+#
3+# Licensed to the Apache Software Foundation (ASF) under one
4+# or more contributor license agreements. See the NOTICE file
5+# distributed with this work for additional information
6+# regarding copyright ownership. The ASF licenses this file
7+# to you under the Apache License, Version 2.0 (the
8+# "License"); you may not use this file except in compliance
9+# with the License. You may obtain a copy of the License at
10+#
11+# http://www.apache.org/licenses/LICENSE-2.0
12+#
13+# Unless required by applicable law or agreed to in writing, software
14+# distributed under the License is distributed on an "AS IS" BASIS,
15+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+# See the License for the specific language governing permissions and
17+# limitations under the License.
18+
19+# Set Hadoop-specific environment variables here.
20+
21+
22+export JAVA_HOME=/usr/local/jvm/java-7-ora
23+# The directory where pid files are stored. /tmp by default.
24+export HADOOP_PID_DIR=/var/run/hadoop-hdfs
25+# Where log files are stored. $HADOOP_PREFIX/logs by default.
26+#export HADOOP_LOG_DIR=/grid/vol/0/var/log/${USER}
27+# for secure datanode. $USER ('root': Apache, HDP; '': CDH)
28+if [ x"$USER" = x'root' -o x"$USER" = x'' ]; then
29+ export HADOOP_LOG_DIR=/grid/vol/0/var/log/hdfs
30+else
31+ export HADOOP_LOG_DIR=/grid/vol/0/var/log/${USER}
32+fi
33+
34+# Extra Java CLASSPATH elements. Optional.
35+if [ x"$HADOOP_CLASSPATH" = x ]; then
36+ export HADOOP_CLASSPATH=""
37+ #export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/grid/usr/commons-daemon-1.0.13/commons-daemon-1.0.13.jar
38+else
39+ # for Hive and HCatalog
40+ export HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:"
41+ #export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/grid/usr/commons-daemon-1.0.13/commons-daemon-1.0.13.jar
42+fi
43+export HADOOP_USER_CLASSPATH_FIRST=false
44+
45+# for hadoop, yarn commands on the CDH
46+export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
47+
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/configuration.xsl (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/hadoop/conf/configuration.xsl (revision 579)
@@ -0,0 +1,40 @@
1+<?xml version="1.0"?>
2+<!--
3+ Licensed to the Apache Software Foundation (ASF) under one or more
4+ contributor license agreements. See the NOTICE file distributed with
5+ this work for additional information regarding copyright ownership.
6+ The ASF licenses this file to You under the Apache License, Version 2.0
7+ (the "License"); you may not use this file except in compliance with
8+ the License. You may obtain a copy of the License at
9+
10+ http://www.apache.org/licenses/LICENSE-2.0
11+
12+ Unless required by applicable law or agreed to in writing, software
13+ distributed under the License is distributed on an "AS IS" BASIS,
14+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+ See the License for the specific language governing permissions and
16+ limitations under the License.
17+-->
18+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
19+<xsl:output method="html"/>
20+<xsl:template match="configuration">
21+<html>
22+<body>
23+<table border="1">
24+<tr>
25+ <td>name</td>
26+ <td>value</td>
27+ <td>description</td>
28+</tr>
29+<xsl:for-each select="property">
30+<tr>
31+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
32+ <td><xsl:value-of select="value"/></td>
33+ <td><xsl:value-of select="description"/></td>
34+</tr>
35+</xsl:for-each>
36+</table>
37+</body>
38+</html>
39+</xsl:template>
40+</xsl:stylesheet>
--- hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/security/limits.d (nonexistent)
+++ hadoop_conf/tags/localhost-cdh5/cdh5.0.0s-1/etc/security/limits.d (revision 579)
Added: bugtraq:number
## -0,0 +1 ##
+true
\ No newline at end of property
Show on old repository browser