hadoop core-site.xml
<?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>fs.defaultFS</name> <value>hdfs://ochadoopcluster</value> <description>The name of the default file system. A URI whose scheme and authority determine the FileSystem implementation. The uri's scheme determines the config property (fs.SCHEME.impl) naming the FileSystem implementation class. The uri's authority is used to determine the host, port, etc. for a filesystem.</description> </property> <property> <name>hadoop.tmp.dir</name> <value>/home/ochadoop/tmp/hadoop/hadoop-${user.name}</value> <description>A base for other temporary directories.</description> </property> <property> <name>ipc.server.listen.queue.size</name> <value>32768</value> <description>Indicates the length of the listen queue for servers accepting client connections. </description> </property> <property> <name>io.native.lib.available</name> <value>true</value> <description>Should native hadoop libraries, if present, be used.</description> </property> <property> <name>io.compression.codecs</name> <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value> <description>A comma-separated list of the compression codec classes that can be used for compression/decompression. In addition to any classes specified with this property (which take precedence), codec classes on the classpath are discovered using a Java ServiceLoader.</description> </property> <property> <description> The user name to filter as, on static web filters while rendering content. An example use is the HDFS web UI (user to be used for browsing files). </description> <name>hadoop.http.staticuser.user</name> <value>ochadoop</value> </property> <property> <name>fs.trash.interval</name> <value>1440</value> <description>Number of minutes after which the checkpoint gets deleted. If zero, the trash feature is disabled. This option may be configured both on the server and the client. If trash is disabled server side then the client side configuration is checked. If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored. </description> </property> <property> <name>fs.trash.checkpoint.interval</name> <value>30</value> <description>Number of minutes between trash checkpoints. Should be smaller or equal to fs.trash.interval. If zero, the value is set to the value of fs.trash.interval. Every time the checkpointer runs it creates a new checkpoint out of current and removes checkpoints created more than fs.trash.interval minutes ago. </description> </property> <property> <name>io.compression.codec.lzo.class</name> <value>com.hadoop.compression.lzo.LzopCodec</value> </property> <property> <name>net.topology.script.file.name</name> <value>/home/ochadoop/app/hadoop/etc/hadoop/rack.py</value> <description> The script name that should be invoked to resolve DNS names to NetworkTopology names. Example: the script would take host.foo.bar as an argument, and return /rack1 as the output. </description> </property> <property> <name>net.topology.script.number.args</name> <value>1</value> <description> The max number of args that the script configured with net.topology.script.file.name should be run with. Each arg is an IP address. </description> </property> <property> <name>ha.failover-controller.cli-check.rpc-timeout.ms</name> <value>60000</value> <description> Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState </description> </property> <property> <name>ipc.client.connect.timeout</name> <value>60000</value> <description>Indicates the number of milliseconds a client will wait for the socket to establish a server connection. </description> </property> </configuration>
Looking for a job working at Home about MSBI