首页 诗词 字典 板报 句子 名言 友答 励志 学校 网站地图
当前位置: 首页 > 教程频道 > 其他教程 > 开源软件 >

hadoop core-default hdfs-default默许配置

2012-06-28 
hadoop core-default hdfs-default默认配置Java代码??dfs.replication.interval3,??fs.hsftp.implorg.ap

hadoop core-default hdfs-default默认配置
Java代码??hadoop core-default hdfs-default默许配置

  1. dfs.replication.interval=3,??
  2. fs.hsftp.impl=org.apache.hadoop.hdfs.HsftpFileSystem,??
  3. dfs.safemode.extension=30000,??
  4. ipc.server.tcpnodelay=false,??
  5. dfs.web.ugi=webuser,webgroup,??
  6. fs.checkpoint.dir=${hadoop.tmp.dir}/dfs/namesecondary,??
  7. dfs.permissions.supergroup=supergroup,??
  8. dfs.datanode.http.address=0.0.0.0:50075,??
  9. dfs.replication.min=1,??
  10. dfs.https.address=0.0.0.0:50470,??
  11. dfs.datanode.dns.nameserver=default,??
  12. dfs.http.address=0.0.0.0:50070,??
  13. io.bytes.per.checksum=512,??
  14. dfs.blockreport.intervalMsec=3600000,??
  15. hadoop.util.hash.type=murmur,??
  16. dfs.data.dir=${hadoop.tmp.dir}/dfs/data,??
  17. fs.hdfs.impl=org.apache.hadoop.hdfs.DistributedFileSystem,??
  18. fs.ramfs.impl=org.apache.hadoop.fs.InMemoryFileSystem,??
  19. dfs.block.size=512,??
  20. fs.hftp.impl=org.apache.hadoop.hdfs.HftpFileSystem,??
  21. fs.checkpoint.period=3600,??
  22. dfs.https.client.keystore.resource=ssl-client.xml,??
  23. hadoop.logfile.count=10,??
  24. dfs.support.append=false,??
  25. ipc.client.connection.maxidletime=10000,??
  26. io.seqfile.lazydecompress=true,??
  27. dfs.datanode.dns.interface=default,??
  28. fs.checkpoint.size=67108864,??
  29. dfs.max.objects=0,??
  30. local.cache.size=10737418240,??
  31. fs.s3n.impl=org.apache.hadoop.fs.s3native.NativeS3FileSystem,??
  32. fs.file.impl=org.apache.hadoop.fs.LocalFileSystem,??
  33. fs.kfs.impl=org.apache.hadoop.fs.kfs.KosmosFileSystem,??
  34. fs.s3.buffer.dir=${hadoop.tmp.dir}/s3,??
  35. dfs.client.block.write.retries=3,??
  36. ipc.client.kill.max=10,??
  37. dfs.datanode.du.reserved=0,??
  38. hadoop.security.authorization=false,??
  39. dfs.replication.max=512,??
  40. dfs.balance.bandwidthPerSec=1048576,??
  41. fs.s3.sleepTimeSeconds=10,??
  42. fs.default.name=hdfs://10.0.18.105:54310,??
  43. hadoop.rpc.socket.factory.class.default=org.apache.hadoop.net.StandardSocketFactory,??
  44. topology.node.switch.mapping.impl=org.apache.hadoop.net.ScriptBasedMapping,??
  45. dfs.datanode.address=0.0.0.0:50010,??
  46. dfs.access.time.precision=3600000,??
  47. dfs.heartbeat.interval=3,??
  48. dfs.replication.considerLoad=true,??
  49. dfs.default.chunk.view.size=32768,??
  50. io.file.buffer.size=4096,??
  51. dfs.https.need.client.auth=false,??
  52. dfs.datanode.ipc.address=0.0.0.0:50020,??
  53. dfs.blockreport.initialDelay=0,??
  54. fs.har.impl.disable.cache=true,??
  55. hadoop.native.lib=true,??
  56. fs.s3.block.size=67108864,??
  57. dfs.replication=2,??
  58. io.compression.codecs=org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,??
  59. dfs.https.enable=false,??
  60. io.seqfile.compress.blocksize=1000000,??
  61. fs.har.impl=org.apache.hadoop.fs.HarFileSystem,??
  62. io.mapfile.bloom.error.rate=0.005,??
  63. dfs.namenode.decommission.interval=30,??
  64. io.skip.checksum.errors=false,??
  65. fs.s3.maxRetries=4,??
  66. ipc.server.listen.queue.size=128,??
  67. fs.trash.interval=0,??
  68. fs.s3.impl=org.apache.hadoop.fs.s3.S3FileSystem,??
  69. io.seqfile.sorter.recordlimit=1000000,??
  70. io.mapfile.bloom.size=1048576,??
  71. dfs.namenode.startup=FORMAT,??
  72. dfs.namenode.decommission.nodes.per.interval=5,??
  73. webinterface.private.actions=false,??
  74. dfs.name.edits.dir=${dfs.name.dir},??
  75. hadoop.tmp.dir=/home/dikar/hadoop/tmp,??
  76. fs.checkpoint.edits.dir=${fs.checkpoint.dir},??
  77. dfs.safemode.threshold.pct=0.999f,??
  78. ipc.client.idlethreshold=4000,??
  79. dfs.permissions=true,??
  80. dfs.namenode.handler.count=10,??
  81. hadoop.logfile.size=10000000,??
  82. dfs.namenode.logging.level=info,??
  83. dfs.datanode.https.address=0.0.0.0:50475,??
  84. dfs.secondary.http.address=0.0.0.0:50090,??
  85. topology.script.number.args=100,??
  86. dfs.https.server.keystore.resource=ssl-server.xml,??
  87. fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem,??
  88. dfs.name.dir=${hadoop.tmp.dir}/dfs/name,??
  89. io.serializations=org.apache.hadoop.io.serializer.WritableSerialization,??
  90. ipc.client.connect.max.retries=10,??
  91. ipc.client.tcpnodelay=false,??
  92. dfs.datanode.handler.count=3,??
  93. dfs.df.interval=60000??

?

?? 看这个默认的配置,大家就会发现

??

Java代码??hadoop core-default hdfs-default默许配置
  1. hadoop.tmp.dir???

?

? ? 这个配置的重要性了,其实tmp 真的不是temp *_<

更多信息请查看?java进阶网?http://www.javady.com

热点排行