云计算课程实验
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

323 line
13 KiB

  1. # Licensed to the Apache Software Foundation (ASF) under one
  2. # or more contributor license agreements. See the NOTICE file
  3. # distributed with this work for additional information
  4. # regarding copyright ownership. The ASF licenses this file
  5. # to you under the Apache License, Version 2.0 (the
  6. # "License"); you may not use this file except in compliance
  7. # with the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # Define some default values that can be overridden by system properties
  17. hadoop.root.logger=INFO,console
  18. hadoop.log.dir=.
  19. hadoop.log.file=hadoop.log
  20. # Define the root logger to the system property "hadoop.root.logger".
  21. log4j.rootLogger=${hadoop.root.logger}, EventCounter
  22. # Logging Threshold
  23. log4j.threshold=ALL
  24. # Null Appender
  25. log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
  26. #
  27. # Rolling File Appender - cap space usage at 5gb.
  28. #
  29. hadoop.log.maxfilesize=256MB
  30. hadoop.log.maxbackupindex=20
  31. log4j.appender.RFA=org.apache.log4j.RollingFileAppender
  32. log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
  33. log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
  34. log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
  35. log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
  36. # Pattern format: Date LogLevel LoggerName LogMessage
  37. log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
  38. # Debugging Pattern format
  39. #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
  40. #
  41. # Daily Rolling File Appender
  42. #
  43. log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
  44. log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
  45. # Rollover at midnight
  46. log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
  47. log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
  48. # Pattern format: Date LogLevel LoggerName LogMessage
  49. log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
  50. # Debugging Pattern format
  51. #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
  52. #
  53. # console
  54. # Add "console" to rootlogger above if you want to use this
  55. #
  56. log4j.appender.console=org.apache.log4j.ConsoleAppender
  57. log4j.appender.console.target=System.err
  58. log4j.appender.console.layout=org.apache.log4j.PatternLayout
  59. log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
  60. #
  61. # TaskLog Appender
  62. #
  63. #Default values
  64. hadoop.tasklog.taskid=null
  65. hadoop.tasklog.iscleanup=false
  66. hadoop.tasklog.noKeepSplits=4
  67. hadoop.tasklog.totalLogFileSize=100
  68. hadoop.tasklog.purgeLogSplits=true
  69. hadoop.tasklog.logsRetainHours=12
  70. log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
  71. log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
  72. log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
  73. log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
  74. log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
  75. log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
  76. #
  77. # HDFS block state change log from block manager
  78. #
  79. # Uncomment the following to log normal block state change
  80. # messages from BlockManager in NameNode.
  81. #log4j.logger.BlockStateChange=DEBUG
  82. #
  83. #Security appender
  84. #
  85. hadoop.security.logger=INFO,NullAppender
  86. hadoop.security.log.maxfilesize=256MB
  87. hadoop.security.log.maxbackupindex=20
  88. log4j.category.SecurityLogger=${hadoop.security.logger}
  89. hadoop.security.log.file=SecurityAuth-${user.name}.audit
  90. log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
  91. log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
  92. log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
  93. log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
  94. log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
  95. log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
  96. #
  97. # Daily Rolling Security appender
  98. #
  99. log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
  100. log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
  101. log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
  102. log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
  103. log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
  104. #
  105. # hadoop configuration logging
  106. #
  107. # Uncomment the following line to turn off configuration deprecation warnings.
  108. # log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
  109. #
  110. # hdfs audit logging
  111. #
  112. hdfs.audit.logger=INFO,NullAppender
  113. hdfs.audit.log.maxfilesize=256MB
  114. hdfs.audit.log.maxbackupindex=20
  115. log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
  116. log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
  117. log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
  118. log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
  119. log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
  120. log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
  121. log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
  122. log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
  123. #
  124. # NameNode metrics logging.
  125. # The default is to retain two namenode-metrics.log files up to 64MB each.
  126. #
  127. namenode.metrics.logger=INFO,NullAppender
  128. log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
  129. log4j.additivity.NameNodeMetricsLog=false
  130. log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
  131. log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
  132. log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
  133. log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
  134. log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
  135. log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
  136. #
  137. # DataNode metrics logging.
  138. # The default is to retain two datanode-metrics.log files up to 64MB each.
  139. #
  140. datanode.metrics.logger=INFO,NullAppender
  141. log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
  142. log4j.additivity.DataNodeMetricsLog=false
  143. log4j.appender.DNMETRICSRFA=org.apache.log4j.RollingFileAppender
  144. log4j.appender.DNMETRICSRFA.File=${hadoop.log.dir}/datanode-metrics.log
  145. log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout
  146. log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
  147. log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
  148. log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
  149. #
  150. # mapred audit logging
  151. #
  152. mapred.audit.logger=INFO,NullAppender
  153. mapred.audit.log.maxfilesize=256MB
  154. mapred.audit.log.maxbackupindex=20
  155. log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
  156. log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
  157. log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
  158. log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
  159. log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
  160. log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
  161. log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
  162. log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
  163. # Custom Logging levels
  164. #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
  165. #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
  166. #log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
  167. # Jets3t library
  168. log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
  169. # AWS SDK & S3A FileSystem
  170. log4j.logger.com.amazonaws=ERROR
  171. log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
  172. log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
  173. #
  174. # Event Counter Appender
  175. # Sends counts of logging messages at different severity levels to Hadoop Metrics.
  176. #
  177. log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
  178. #
  179. # Job Summary Appender
  180. #
  181. # Use following logger to send summary to separate file defined by
  182. # hadoop.mapreduce.jobsummary.log.file :
  183. # hadoop.mapreduce.jobsummary.logger=INFO,JSA
  184. #
  185. hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
  186. hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
  187. hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
  188. hadoop.mapreduce.jobsummary.log.maxbackupindex=20
  189. log4j.appender.JSA=org.apache.log4j.RollingFileAppender
  190. log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
  191. log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
  192. log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
  193. log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
  194. log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
  195. log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
  196. log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
  197. #
  198. # shuffle connection log from shuffleHandler
  199. # Uncomment the following line to enable logging of shuffle connections
  200. # log4j.logger.org.apache.hadoop.mapred.ShuffleHandler.audit=DEBUG
  201. #
  202. # Yarn ResourceManager Application Summary Log
  203. #
  204. # Set the ResourceManager summary log filename
  205. yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
  206. # Set the ResourceManager summary log level and appender
  207. yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
  208. #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
  209. # To enable AppSummaryLogging for the RM,
  210. # set yarn.server.resourcemanager.appsummary.logger to
  211. # <LEVEL>,RMSUMMARY in hadoop-env.sh
  212. # Appender for ResourceManager Application Summary Log
  213. # Requires the following properties to be set
  214. # - hadoop.log.dir (Hadoop Log directory)
  215. # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
  216. # - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
  217. log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
  218. log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
  219. log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
  220. log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
  221. log4j.appender.RMSUMMARY.MaxFileSize=256MB
  222. log4j.appender.RMSUMMARY.MaxBackupIndex=20
  223. log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
  224. log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
  225. # HS audit log configs
  226. #mapreduce.hs.audit.logger=INFO,HSAUDIT
  227. #log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
  228. #log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
  229. #log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
  230. #log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
  231. #log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
  232. #log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
  233. #log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
  234. # Http Server Request Logs
  235. #log4j.logger.http.requests.namenode=INFO,namenoderequestlog
  236. #log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
  237. #log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
  238. #log4j.appender.namenoderequestlog.RetainDays=3
  239. #log4j.logger.http.requests.datanode=INFO,datanoderequestlog
  240. #log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
  241. #log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
  242. #log4j.appender.datanoderequestlog.RetainDays=3
  243. #log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
  244. #log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
  245. #log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
  246. #log4j.appender.resourcemanagerrequestlog.RetainDays=3
  247. #log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
  248. #log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
  249. #log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
  250. #log4j.appender.jobhistoryrequestlog.RetainDays=3
  251. #log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
  252. #log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
  253. #log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
  254. #log4j.appender.nodemanagerrequestlog.RetainDays=3
  255. # WebHdfs request log on datanodes
  256. # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
  257. # direct the log to a separate file.
  258. #datanode.webhdfs.logger=INFO,console
  259. #log4j.logger.datanode.webhdfs=${datanode.webhdfs.logger}
  260. #log4j.appender.HTTPDRFA=org.apache.log4j.DailyRollingFileAppender
  261. #log4j.appender.HTTPDRFA.File=${hadoop.log.dir}/hadoop-datanode-webhdfs.log
  262. #log4j.appender.HTTPDRFA.layout=org.apache.log4j.PatternLayout
  263. #log4j.appender.HTTPDRFA.layout.ConversionPattern=%d{ISO8601} %m%n
  264. #log4j.appender.HTTPDRFA.DatePattern=.yyyy-MM-dd
  265. # Appender for viewing information for errors and warnings
  266. yarn.ewma.cleanupInterval=300
  267. yarn.ewma.messageAgeLimitSeconds=86400
  268. yarn.ewma.maxUniqueMessages=250
  269. log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
  270. log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
  271. log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
  272. log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}