You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2014/12/01 21:47:12 UTC
ambari git commit: Removing .rej file committed by accident. (yusaku)
Repository: ambari
Updated Branches:
refs/heads/trunk 716b9b783 -> 6de203aad
Removing .rej file committed by accident. (yusaku)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6de203aa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6de203aa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6de203aa
Branch: refs/heads/trunk
Commit: 6de203aad44c5184e8f913b44b0743c645ce64bc
Parents: 716b9b7
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Mon Dec 1 12:46:24 2014 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Mon Dec 1 12:46:24 2014 -0800
----------------------------------------------------------------------
.../2.1/configs/secured-storm-start.json.rej | 527 -------------------
1 file changed, 527 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/6de203aa/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
deleted file mode 100644
index 93981b5..0000000
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
+++ /dev/null
@@ -1,527 +0,0 @@
-***************
-*** 1,262 ****
- {
- "configuration_attributes": {
-- "storm-site": {},
- "hdfs-site": {
- "final": {
-- "dfs.support.append": "true",
- "dfs.namenode.http-address": "true"
- }
-- },
-- "storm-env": {},
- "core-site": {
- "final": {
- "fs.defaultFS": "true"
- }
-- },
-- "hadoop-policy": {},
-- "hdfs-log4j": {},
-- "hadoop-env": {},
-- "zookeeper-env": {},
-- "zookeeper-log4j": {},
- "cluster-env": {}
-- },
- "commandParams": {
-- "command_timeout": "600",
-- "script": "scripts/nimbus.py",
-- "script_type": "PYTHON",
-- "service_package_folder": "HDP/2.1/services/STORM/package",
- "hooks_folder": "HDP/2.0.6/hooks"
-- },
-- "roleCommand": "START",
-- "clusterName": "pacan",
-- "hostname": "c6402.ambari.apache.org",
- "hostLevelParams": {
-- "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
-- "ambari_db_rca_password": "mapred",
-- "java_home": "/usr/jdk64/jdk1.7.0_45",
-- "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
-- "jce_name": "UnlimitedJCEPolicyJDK7.zip",
-- "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
-- "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
-- "group_list": "[\"hadoop\",\"users\"]",
-- "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
-- "stack_version": "2.2",
-- "stack_name": "HDP",
-- "db_name": "ambari",
-- "ambari_db_rca_driver": "org.postgresql.Driver",
-- "jdk_name": "jdk-7u45-linux-x64.tar.gz",
-- "ambari_db_rca_username": "mapred",
-- "db_driver_filename": "mysql-connector-java.jar",
-- "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
- "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
-- },
-- "commandType": "EXECUTION_COMMAND",
-- "roleParams": {},
-- "serviceName": "STORM",
-- "role": "NIMBUS",
-- "forceRefreshConfigTags": [],
-- "taskId": 54,
-- "public_hostname": "c6402.ambari.apache.org",
- "configurations": {
- "storm-site": {
-- "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
-- "topology.workers": "1",
-- "drpc.worker.threads": "64",
-- "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
-- "supervisor.heartbeat.frequency.secs": "5",
-- "topology.executor.send.buffer.size": "1024",
-- "drpc.childopts": "-Xmx768m",
-- "nimbus.thrift.port": "6627",
-- "storm.zookeeper.retry.intervalceiling.millis": "30000",
-- "storm.local.dir": "/hadoop/storm",
-- "topology.receiver.buffer.size": "8",
-- "storm.messaging.netty.client_worker_threads": "1",
-- "transactional.zookeeper.root": "/transactional",
-- "topology.skip.missing.kryo.registrations": "false",
-- "worker.heartbeat.frequency.secs": "1",
-- "zmq.hwm": "0",
-- "storm.zookeeper.connection.timeout": "15000",
-- "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
-- "storm.messaging.netty.server_worker_threads": "1",
-- "supervisor.worker.start.timeout.secs": "120",
-- "zmq.threads": "1",
-- "topology.acker.executors": "null",
-- "storm.local.mode.zmq": "false",
-- "topology.max.task.parallelism": "null",
-- "topology.max.error.report.per.interval": "5",
-- "storm.zookeeper.port": "2181",
-- "drpc.queue.size": "128",
-- "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
-- "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
-- "storm.zookeeper.retry.times": "5",
-- "nimbus.monitor.freq.secs": "10",
-- "storm.cluster.mode": "distributed",
-- "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
-- "drpc.invocations.port": "3773",
-- "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
-- "storm.zookeeper.root": "/storm",
-- "logviewer.childopts": "-Xmx128m",
-- "transactional.zookeeper.port": "null",
-- "topology.worker.childopts": "null",
-- "topology.max.spout.pending": "null",
-- "nimbus.cleanup.inbox.freq.secs": "600",
-- "storm.messaging.netty.min_wait_ms": "100",
-- "nimbus.task.timeout.secs": "30",
-- "nimbus.thrift.max_buffer_size": "1048576",
-- "topology.sleep.spout.wait.strategy.time.ms": "1",
-- "topology.optimize": "true",
-- "nimbus.reassign": "true",
-- "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
-- "logviewer.appender.name": "A1",
-- "nimbus.host": "c6402.ambari.apache.org",
-- "ui.port": "8744",
-- "supervisor.slots.ports": "[6700, 6701]",
-- "nimbus.file.copy.expiration.secs": "600",
-- "supervisor.monitor.frequency.secs": "3",
-- "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
-- "transactional.zookeeper.servers": "null",
-- "zmq.linger.millis": "5000",
-- "topology.error.throttle.interval.secs": "10",
-- "topology.worker.shared.thread.pool.size": "4",
-- "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
-- "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
-- "task.heartbeat.frequency.secs": "3",
-- "topology.transfer.buffer.size": "1024",
-- "storm.zookeeper.session.timeout": "20000",
-- "topology.executor.receive.buffer.size": "1024",
-- "topology.stats.sample.rate": "0.05",
-- "topology.fall.back.on.java.serialization": "true",
-- "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
-- "topology.enable.message.timeouts": "true",
-- "storm.messaging.netty.max_wait_ms": "1000",
-- "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
-- "nimbus.supervisor.timeout.secs": "60",
-- "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
-- "nimbus.inbox.jar.expiration.secs": "3600",
-- "drpc.port": "3772",
-- "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
-- "storm.zookeeper.retry.interval": "1000",
-- "storm.messaging.netty.max_retries": "30",
-- "topology.tick.tuple.freq.secs": "null",
-- "drpc.request.timeout.secs": "600",
-- "nimbus.task.launch.secs": "120",
-- "task.refresh.poll.secs": "10",
-- "topology.message.timeout.secs": "30",
-- "storm.messaging.netty.buffer_size": "5242880",
-- "topology.state.synchronization.timeout.secs": "60",
-- "supervisor.worker.timeout.secs": "30",
-- "topology.trident.batch.emit.interval.millis": "500",
-- "topology.builtin.metrics.bucket.size.secs": "60",
-- "logviewer.port": "8000",
- "topology.debug": "false"
-- },
- "hdfs-site": {
-- "dfs.namenode.avoid.write.stale.datanode": "true",
-- "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
-- "dfs.namenode.checkpoint.txns": "1000000",
-- "dfs.block.access.token.enable": "true",
-- "dfs.support.append": "true",
-- "dfs.datanode.address": "0.0.0.0:1019",
-- "dfs.cluster.administrators": " hdfs",
-- "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
-- "dfs.datanode.balance.bandwidthPerSec": "6250000",
-- "dfs.namenode.safemode.threshold-pct": "1.0f",
-- "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
-- "dfs.permissions.enabled": "true",
-- "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
-- "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
-- "dfs.https.port": "50470",
-- "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
-- "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
-- "dfs.blockreport.initialDelay": "120",
-- "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
-- "dfs.blocksize": "134217728",
-- "dfs.client.read.shortcircuit": "true",
-- "dfs.datanode.max.transfer.threads": "1024",
-- "dfs.heartbeat.interval": "3",
-- "dfs.replication": "3",
-- "dfs.namenode.handler.count": "40",
-- "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
-- "fs.permissions.umask-mode": "022",
-- "dfs.namenode.stale.datanode.interval": "30000",
-- "dfs.datanode.ipc.address": "0.0.0.0:8010",
-- "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
-- "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
-- "dfs.datanode.data.dir": "/hadoop/hdfs/data",
-- "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
-- "dfs.webhdfs.enabled": "true",
-- "dfs.datanode.failed.volumes.tolerated": "0",
-- "dfs.namenode.accesstime.precision": "0",
-- "dfs.namenode.avoid.read.stale.datanode": "true",
-- "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
-- "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
-- "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
-- "dfs.datanode.http.address": "0.0.0.0:1022",
-- "dfs.datanode.du.reserved": "1073741824",
-- "dfs.client.read.shortcircuit.streams.cache.size": "4096",
-- "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
-- "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
-- "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
-- "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
-- "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
-- "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
-- "dfs.permissions.superusergroup": "hdfs",
-- "dfs.journalnode.http-address": "0.0.0.0:8480",
-- "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
-- "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
-- "dfs.namenode.write.stale.datanode.ratio": "1.0f",
-- "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
-- "dfs.datanode.data.dir.perm": "750",
-- "dfs.namenode.name.dir.restore": "true",
-- "dfs.replication.max": "50",
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.http.policy": "HTTP_ONLY"
-- },
- "storm-env": {
-- "storm_log_dir": "/var/log/storm",
-- "storm_principal_name": "storm@EXAMPLE.COM",
-- "storm_pid_dir": "/var/run/storm",
-- "storm_user": "storm",
-- "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
-- "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM",
-- "strom_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab",
-- "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab",
-- "storm_keytab": "/etc/security/keytabs/storm.service.keytab",
- "strom_ui_principal_name": "HTTP/_HOST"
-- },
- "core-site": {
-- "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
-- "fs.trash.interval": "360",
-- "hadoop.security.authentication": "kerberos",
-- "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
-- "hadoop.proxyuser.falcon.hosts": "*",
-- "mapreduce.jobtracker.webinterface.trusted": "false",
-- "hadoop.security.authorization": "true",
-- "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
-- "ipc.server.tcpnodelay": "true",
-- "ipc.client.connect.max.retries": "50",
-- "ipc.client.idlethreshold": "8000",
-- "io.file.buffer.size": "131072",
-- "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT",
-- "ipc.client.connection.maxidletime": "30000",
- "hadoop.proxyuser.falcon.groups": "users"
-- },
- "hadoop-policy": {
-- "security.job.client.protocol.acl": "*",
-- "security.job.task.protocol.acl": "*",
-- "security.datanode.protocol.acl": "*",
-- "security.namenode.protocol.acl": "*",
-- "security.client.datanode.protocol.acl": "*",
-- "security.inter.tracker.protocol.acl": "*",
-- "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
-- "security.client.protocol.acl": "*",
-- "security.refresh.policy.protocol.acl": "hadoop",
-- "security.admin.operations.protocol.acl": "hadoop",
- "security.inter.datanode.protocol.acl": "*"
-- },
- "hdfs-log4j": {
- "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.
logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.append
er.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.lo
gger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audi
t=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.
file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#
\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
-- },
- "hadoop-env": {
- "namenode_opt_maxnewsize": "200m",
- "hdfs_log_dir_prefix": "/var/log/hadoop",
---- 1,262 ----
- {
- "configuration_attributes": {
-+ "storm-site": {},
- "hdfs-site": {
- "final": {
-+ "dfs.support.append": "true",
- "dfs.namenode.http-address": "true"
- }
-+ },
-+ "storm-env": {},
- "core-site": {
- "final": {
- "fs.defaultFS": "true"
- }
-+ },
-+ "hadoop-policy": {},
-+ "hdfs-log4j": {},
-+ "hadoop-env": {},
-+ "zookeeper-env": {},
-+ "zookeeper-log4j": {},
- "cluster-env": {}
-+ },
- "commandParams": {
-+ "command_timeout": "600",
-+ "script": "scripts/nimbus.py",
-+ "script_type": "PYTHON",
-+ "service_package_folder": "HDP/2.1/services/STORM/package",
- "hooks_folder": "HDP/2.0.6/hooks"
-+ },
-+ "roleCommand": "START",
-+ "clusterName": "pacan",
-+ "hostname": "c6402.ambari.apache.org",
- "hostLevelParams": {
-+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
-+ "ambari_db_rca_password": "mapred",
-+ "java_home": "/usr/jdk64/jdk1.7.0_45",
-+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
-+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
-+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
-+ "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
-+ "group_list": "[\"hadoop\",\"users\"]",
-+ "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
-+ "stack_version": "2.2",
-+ "stack_name": "HDP",
-+ "db_name": "ambari",
-+ "ambari_db_rca_driver": "org.postgresql.Driver",
-+ "jdk_name": "jdk-7u45-linux-x64.tar.gz",
-+ "ambari_db_rca_username": "mapred",
-+ "db_driver_filename": "mysql-connector-java.jar",
-+ "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
- "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
-+ },
-+ "commandType": "EXECUTION_COMMAND",
-+ "roleParams": {},
-+ "serviceName": "STORM",
-+ "role": "NIMBUS",
-+ "forceRefreshConfigTags": [],
-+ "taskId": 54,
-+ "public_hostname": "c6402.ambari.apache.org",
- "configurations": {
- "storm-site": {
-+ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
-+ "topology.workers": "1",
-+ "drpc.worker.threads": "64",
-+ "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
-+ "supervisor.heartbeat.frequency.secs": "5",
-+ "topology.executor.send.buffer.size": "1024",
-+ "drpc.childopts": "-Xmx768m",
-+ "nimbus.thrift.port": "6627",
-+ "storm.zookeeper.retry.intervalceiling.millis": "30000",
-+ "storm.local.dir": "/hadoop/storm",
-+ "topology.receiver.buffer.size": "8",
-+ "storm.messaging.netty.client_worker_threads": "1",
-+ "transactional.zookeeper.root": "/transactional",
-+ "topology.skip.missing.kryo.registrations": "false",
-+ "worker.heartbeat.frequency.secs": "1",
-+ "zmq.hwm": "0",
-+ "storm.zookeeper.connection.timeout": "15000",
-+ "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
-+ "storm.messaging.netty.server_worker_threads": "1",
-+ "supervisor.worker.start.timeout.secs": "120",
-+ "zmq.threads": "1",
-+ "topology.acker.executors": "null",
-+ "storm.local.mode.zmq": "false",
-+ "topology.max.task.parallelism": "null",
-+ "topology.max.error.report.per.interval": "5",
-+ "storm.zookeeper.port": "2181",
-+ "drpc.queue.size": "128",
-+ "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
-+ "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
-+ "storm.zookeeper.retry.times": "5",
-+ "nimbus.monitor.freq.secs": "10",
-+ "storm.cluster.mode": "distributed",
-+ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
-+ "drpc.invocations.port": "3773",
-+ "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
-+ "storm.zookeeper.root": "/storm",
-+ "logviewer.childopts": "-Xmx128m",
-+ "transactional.zookeeper.port": "null",
-+ "topology.worker.childopts": "null",
-+ "topology.max.spout.pending": "null",
-+ "nimbus.cleanup.inbox.freq.secs": "600",
-+ "storm.messaging.netty.min_wait_ms": "100",
-+ "nimbus.task.timeout.secs": "30",
-+ "nimbus.thrift.max_buffer_size": "1048576",
-+ "topology.sleep.spout.wait.strategy.time.ms": "1",
-+ "topology.optimize": "true",
-+ "nimbus.reassign": "true",
-+ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
-+ "logviewer.appender.name": "A1",
-+ "nimbus.host": "c6402.ambari.apache.org",
-+ "ui.port": "8744",
-+ "supervisor.slots.ports": "[6700, 6701]",
-+ "nimbus.file.copy.expiration.secs": "600",
-+ "supervisor.monitor.frequency.secs": "3",
-+ "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
-+ "transactional.zookeeper.servers": "null",
-+ "zmq.linger.millis": "5000",
-+ "topology.error.throttle.interval.secs": "10",
-+ "topology.worker.shared.thread.pool.size": "4",
-+ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
-+ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
-+ "task.heartbeat.frequency.secs": "3",
-+ "topology.transfer.buffer.size": "1024",
-+ "storm.zookeeper.session.timeout": "20000",
-+ "topology.executor.receive.buffer.size": "1024",
-+ "topology.stats.sample.rate": "0.05",
-+ "topology.fall.back.on.java.serialization": "true",
-+ "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
-+ "topology.enable.message.timeouts": "true",
-+ "storm.messaging.netty.max_wait_ms": "1000",
-+ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
-+ "nimbus.supervisor.timeout.secs": "60",
-+ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
-+ "nimbus.inbox.jar.expiration.secs": "3600",
-+ "drpc.port": "3772",
-+ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
-+ "storm.zookeeper.retry.interval": "1000",
-+ "storm.messaging.netty.max_retries": "30",
-+ "topology.tick.tuple.freq.secs": "null",
-+ "drpc.request.timeout.secs": "600",
-+ "nimbus.task.launch.secs": "120",
-+ "task.refresh.poll.secs": "10",
-+ "topology.message.timeout.secs": "30",
-+ "storm.messaging.netty.buffer_size": "5242880",
-+ "topology.state.synchronization.timeout.secs": "60",
-+ "supervisor.worker.timeout.secs": "30",
-+ "topology.trident.batch.emit.interval.millis": "500",
-+ "topology.builtin.metrics.bucket.size.secs": "60",
-+ "logviewer.port": "8000",
- "topology.debug": "false"
-+ },
- "hdfs-site": {
-+ "dfs.namenode.avoid.write.stale.datanode": "true",
-+ "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
-+ "dfs.namenode.checkpoint.txns": "1000000",
-+ "dfs.block.access.token.enable": "true",
-+ "dfs.support.append": "true",
-+ "dfs.datanode.address": "0.0.0.0:1019",
-+ "dfs.cluster.administrators": " hdfs",
-+ "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
-+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
-+ "dfs.namenode.safemode.threshold-pct": "1.0f",
-+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
-+ "dfs.permissions.enabled": "true",
-+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
-+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
-+ "dfs.https.port": "50470",
-+ "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
-+ "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
-+ "dfs.blockreport.initialDelay": "120",
-+ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
-+ "dfs.blocksize": "134217728",
-+ "dfs.client.read.shortcircuit": "true",
-+ "dfs.datanode.max.transfer.threads": "1024",
-+ "dfs.heartbeat.interval": "3",
-+ "dfs.replication": "3",
-+ "dfs.namenode.handler.count": "40",
-+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
-+ "fs.permissions.umask-mode": "022",
-+ "dfs.namenode.stale.datanode.interval": "30000",
-+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
-+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
-+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
-+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
-+ "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
-+ "dfs.webhdfs.enabled": "true",
-+ "dfs.datanode.failed.volumes.tolerated": "0",
-+ "dfs.namenode.accesstime.precision": "0",
-+ "dfs.namenode.avoid.read.stale.datanode": "true",
-+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
-+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
-+ "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
-+ "dfs.datanode.http.address": "0.0.0.0:1022",
-+ "dfs.datanode.du.reserved": "1073741824",
-+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
-+ "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
-+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
-+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
-+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
-+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
-+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
-+ "dfs.permissions.superusergroup": "hdfs",
-+ "dfs.journalnode.http-address": "0.0.0.0:8480",
-+ "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
-+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
-+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
-+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
-+ "dfs.datanode.data.dir.perm": "750",
-+ "dfs.namenode.name.dir.restore": "true",
-+ "dfs.replication.max": "50",
- "dfs.namenode.checkpoint.period": "21600",
- "dfs.http.policy": "HTTP_ONLY"
-+ },
- "storm-env": {
-+ "storm_log_dir": "/var/log/storm",
-+ "storm_principal_name": "storm@EXAMPLE.COM",
-+ "storm_pid_dir": "/var/run/storm",
-+ "storm_user": "storm",
-+ "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
-+ "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM",
-+ "strom_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab",
-+ "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab",
-+ "storm_keytab": "/etc/security/keytabs/storm.service.keytab",
- "strom_ui_principal_name": "HTTP/_HOST"
-+ },
- "core-site": {
-+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
-+ "fs.trash.interval": "360",
-+ "hadoop.security.authentication": "kerberos",
-+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
-+ "hadoop.proxyuser.falcon.hosts": "*",
-+ "mapreduce.jobtracker.webinterface.trusted": "false",
-+ "hadoop.security.authorization": "true",
-+ "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
-+ "ipc.server.tcpnodelay": "true",
-+ "ipc.client.connect.max.retries": "50",
-+ "ipc.client.idlethreshold": "8000",
-+ "io.file.buffer.size": "131072",
-+ "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT",
-+ "ipc.client.connection.maxidletime": "30000",
- "hadoop.proxyuser.falcon.groups": "users"
-+ },
- "hadoop-policy": {
-+ "security.job.client.protocol.acl": "*",
-+ "security.job.task.protocol.acl": "*",
-+ "security.datanode.protocol.acl": "*",
-+ "security.namenode.protocol.acl": "*",
-+ "security.client.datanode.protocol.acl": "*",
-+ "security.inter.tracker.protocol.acl": "*",
-+ "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
-+ "security.client.protocol.acl": "*",
-+ "security.refresh.policy.protocol.acl": "hadoop",
-+ "security.admin.operations.protocol.acl": "hadoop",
- "security.inter.datanode.protocol.acl": "*"
-+ },
- "hdfs-log4j": {
- "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.
logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.append
er.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.lo
gger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audi
t=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.
file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#
\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
-+ },
- "hadoop-env": {
- "namenode_opt_maxnewsize": "200m",
- "hdfs_log_dir_prefix": "/var/log/hadoop",