-
Notifications
You must be signed in to change notification settings - Fork 1
/
hadoop-2.conf
74 lines (69 loc) · 3.44 KB
/
hadoop-2.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
################################################################################
# Host-specific Hadoop 2.x configuration
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Customize Peel configuration values appearing in
#
# https://github.com/stratosphere/peel/blob/master/peel-extensions/src/main/resources/reference.hadoop-2.x.conf
#
# here.
#
system {
hadoop-2 {
format = true
config {
# hadoop-env.sh entries
env {
# directory where process IDs are stored
HADOOP_PID_DIR = "/data/peel/hadoop-2/pid"
}
# core-site.xml entries
core {
fs.default.name = "hdfs://"${runtime.hostname}":45010/"
io.file.buffer.size = 524288
hadoop.tmp.dir = "/data/peel/hadoop-2/tmp"
hadoop.http.staticuser.user = "hadoop"
# enable this if you want to use hadoop with native libraries
# only use if there is a hadoop version compiled with native libraries for your environment!
# enable client short circuit read
# dfs.client.read.shortcircuit = true
# dfs.domain.socket.path = "/data/peel/hadoop-2/scr"
}
# hdfs-site.xml entries
hdfs {
# folders for namenode, data, and checkpoint storage (node-local)
dfs.namenode.name.dir = "/data/1/peel/hadoop-2/name,/data/2/peel/hadoop-2/name,/data/3/peel/hadoop-2/name,/data/4/peel/hadoop-2/name"
dfs.datanode.data.dir = "/data/1/peel/hadoop-2/data,/data/2/peel/hadoop-2/data,/data/3/peel/hadoop-2/data,/data/4/peel/hadoop-2/data"
dfs.namenode.checkpoint.dir = "/data/1/peel/hadoop-2/check,/data/2/peel/hadoop-2/check,/data/3/peel/hadoop-2/check,/data/4/peel/hadoop-2/check"
dfs.replication = 3
dfs.permissions.enabled = true
dfs.blocksize = 134217728
# namenode
dfs.namenode.http-address = "0.0.0.0:45010"
dfs.namenode.https-address = "0.0.0.0:45011"
dfs.namenode.secondary.http-address = "0.0.0.0:45012"
dfs.namenode.secondary.https-address = "0.0.0.0:45013"
dfs.namenode.backup.address = "0.0.0.0:45014"
dfs.namenode.backup.http-address = "0.0.0.0:45015"
dfs.namenode.safemode.threshold-pct = "0.9f"
dfs.namenode.safemode.extension = 10000
dfs.namenode.handler.count = 100
# datanode
dfs.datanode.address = "0.0.0.0:45020"
dfs.datanode.http.address = "0.0.0.0:45021"
dfs.datanode.ipc.address = "0.0.0.0:45022"
dfs.datanode.balance.bandwidthPerSec = 10000000000
# journalnode
dfs.journalnode.rpc-address = "0.0.0.0:45030"
dfs.journalnode.http-address = "0.0.0.0:45031"
dfs.journalnode.https-address = "0.0.0.0:45032"
}
# enable this if you want to use hadoop with native libraries
# only use if there is a hadoop version compiled with native libraries for your environment!
# env {
# HADOOP_COMMON_LIB_NATIVE_DIR = "$HADOOP_INSTALL/lib/native"
# HADOOP_OPTS= "-Djava.library.path="${system.hadoop-2.path.home}"/lib/native"
# }
}
}
}