Java访问Hadoop分布式文件系统HDFS的配置说明

配置文件

m103替换为hdfs服务地址。
要利用Java客户端来存取HDFS上的文件,不得不说的是配置文件hadoop-0.20.2/conf/core-site.xml了,最初我就是在这里吃了大亏,所以我死活连不上HDFS,文件无法创建、读取。

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
<!--- global properties -->
<property>
<name>hadoop.tmp.dir</name>
<value>/home/zhangzk/hadoop</value>
<description>A base for other temporary directories.</description>
</property>
<!-- file system properties -->
<property>
<name>fs.default.name</name>
<value>hdfs://linux-zzk-113:9000</value>
</property>
</configuration>

配置项:hadoop.tmp.dir表示命名节点上存放元数据的目录位置,对于数据节点则为该节点上存放文件数据的目录。

配置项:fs.default.name表示命名的IP地址和端口号,缺省值是file:///,对于JavaAPI来讲,连接HDFS必须使用这里的配置的URL地址,对于数据节点来讲,数据节点通过该URL来访问命名节点。

hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>

<!--Autogenerated by Cloudera Manager-->
<configuration>
 <property>
  <name>dfs.namenode.name.dir</name>
  <value>file:///mnt/sdc1/dfs/nn</value>
 </property>
 <property>
  <name>dfs.namenode.servicerpc-address</name>
  <value>m103:8022</value>
 </property>
 <property>
  <name>dfs.https.address</name>
  <value>m103:50470</value>
 </property>
 <property>
  <name>dfs.https.port</name>
  <value>50470</value>
 </property>
 <property>
  <name>dfs.namenode.http-address</name>
  <value>m103:50070</value>
 </property>
 <property>
  <name>dfs.replication</name>
  <value>3</value>
 </property>
 <property>
  <name>dfs.blocksize</name>
  <value>134217728</value>
 </property>
 <property>
  <name>dfs.client.use.datanode.hostname</name>
  <value>false</value>
 </property>
 <property>
  <name>fs.permissions.umask-mode</name>
  <value>022</value>
 </property>
 <property>
  <name>dfs.namenode.acls.enabled</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.block.local-path-access.user</name>
  <value>cloudera-scm</value>
 </property>
 <property>
  <name>dfs.client.read.shortcircuit</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.domain.socket.path</name>
  <value>/var/run/hdfs-sockets/dn</value>
 </property>
 <property>
  <name>dfs.client.read.shortcircuit.skip.checksum</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.client.domain.socket.data.traffic</name>
  <value>false</value>
 </property>
 <property>
  <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
  <value>true</value>
 </property>
 <property>
  <name>fs.http.impl</name>
  <value>com.scistor.datavision.fs.HTTPFileSystem</value>
 </property>
</configuration>

mapred-site.xml

<?xml version="1.0" encoding="UTF-8"?>

<!--Autogenerated by Cloudera Manager-->
<configuration>
 <property>
  <name>mapreduce.job.split.metainfo.maxsize</name>
  <value>10000000</value>
 </property>
 <property>
  <name>mapreduce.job.counters.max</name>
  <value>120</value>
 </property>
 <property>
  <name>mapreduce.output.fileoutputformat.compress</name>
  <value>true</value>
 </property>
 <property>
  <name>mapreduce.output.fileoutputformat.compress.type</name>
  <value>BLOCK</value>
 </property>
 <property>
  <name>mapreduce.output.fileoutputformat.compress.codec</name>
  <value>org.apache.hadoop.io.compress.SnappyCodec</value>
 </property>
 <property>
  <name>mapreduce.map.output.compress.codec</name>
  <value>org.apache.hadoop.io.compress.SnappyCodec</value>
 </property>
 <property>
  <name>mapreduce.map.output.compress</name>
  <value>true</value>
 </property>
 <property>
  <name>zlib.compress.level</name>
  <value>DEFAULT_COMPRESSION</value>
 </property>
 <property>
  <name>mapreduce.task.io.sort.factor</name>
  <value>64</value>
 </property>
 <property>
  <name>mapreduce.map.sort.spill.percent</name>
  <value>0.8</value>
 </property>
 <property>
  <name>mapreduce.reduce.shuffle.parallelcopies</name>
  <value>10</value>
 </property>
 <property>
  <name>mapreduce.task.timeout</name>
  <value>600000</value>
 </property>
 <property>
  <name>mapreduce.client.submit.file.replication</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.job.reduces</name>
  <value>24</value>
 </property>
 <property>
  <name>mapreduce.task.io.sort.mb</name>
  <value>256</value>
 </property>
 <property>
  <name>mapreduce.map.speculative</name>
  <value>false</value>
 </property>
 <property>
  <name>mapreduce.reduce.speculative</name>
  <value>false</value>
 </property>
 <property>
  <name>mapreduce.job.reduce.slowstart.completedmaps</name>
  <value>0.8</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.address</name>
  <value>m103:10020</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.webapp.address</name>
  <value>m103:19888</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.webapp.https.address</name>
  <value>m103:19890</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.admin.address</name>
  <value>m103:10033</value>
 </property>
 <property>
  <name>mapreduce.framework.name</name>
  <value>yarn</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.staging-dir</name>
  <value>/user</value>
 </property>
 <property>
  <name>mapreduce.am.max-attempts</name>
  <value>2</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.resource.mb</name>
  <value>2048</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.resource.cpu-vcores</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.job.ubertask.enable</name>
  <value>false</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.command-opts</name>
  <value>-Djava.net.preferIPv4Stack=true -Xmx1717986918</value>
 </property>
 <property>
  <name>mapreduce.map.java.opts</name>
  <value>-Djava.net.preferIPv4Stack=true -Xmx1717986918</value>
 </property>
 <property>
  <name>mapreduce.reduce.java.opts</name>
  <value>-Djava.net.preferIPv4Stack=true -Xmx2576980378</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.admin.user.env</name>
  <value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value>
 </property>
 <property>
  <name>mapreduce.map.memory.mb</name>
  <value>2048</value>
 </property>
 <property>
  <name>mapreduce.map.cpu.vcores</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.reduce.memory.mb</name>
  <value>3072</value>
 </property>
 <property>
  <name>mapreduce.reduce.cpu.vcores</name>
  <value>1</value>
 </property>
 <property>
  <name>mapreduce.application.classpath</name>
  <value>$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,$MR2_CLASSPATH,$CDH_HCAT_HOME/share/hcatalog/*,$CDH_HIVE_HOME/lib/*,/etc/hive/conf,/opt/cloudera/parcels/CDH/lib/udps/*</value>
 </property>
 <property>
  <name>mapreduce.admin.user.env</name>
  <value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value>
 </property>
 <property>
  <name>mapreduce.shuffle.max.connections</name>
  <value>80</value>
 </property>
</configuration>

利用JavaAPI来访问HDFS的文件与目录

package com.demo.hdfs;

import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Progressable;

/**
 * @author zhangzk
 * 
 */
public class FileCopyToHdfs {

 public static void main(String[] args) throws Exception {
 try {
  //uploadToHdfs();  
  //deleteFromHdfs();
  //getDirectoryFromHdfs();
  appendToHdfs();
  readFromHdfs();
 } catch (Exception e) {
  // TODO Auto-generated catch block
  e.printStackTrace();
 }
 finally
 {
  System.out.println("SUCCESS");
 }
 }

 /**上传文件到HDFS上去*/

 private static void uploadToHdfs() throws FileNotFoundException,IOException {
 String localSrc = "d://qq.txt";
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";
 InputStream in = new BufferedInputStream(new FileInputStream(localSrc));
 Configuration conf = new Configuration();
 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 OutputStream out = fs.create(new Path(dst), new Progressable() {
  public void progress() {
  System.out.print(".");
  }
 });
 IOUtils.copyBytes(in, out, 4096, true);
 }





 /**从HDFS上读取文件*/
 private static void readFromHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 FSDataInputStream hdfsInStream = fs.open(new Path(dst));
 
 OutputStream out = new FileOutputStream("d:/qq-hdfs.txt"); 
 byte[] ioBuffer = new byte[1024];
 int readLen = hdfsInStream.read(ioBuffer);

 while(-1 != readLen){
 out.write(ioBuffer, 0, readLen); 
 readLen = hdfsInStream.read(ioBuffer);
 }
 out.close();
 hdfsInStream.close();
 fs.close();
 }
 

 /**以append方式将内容添加到HDFS上文件的末尾;注意:文件更新,需要在hdfs-site.xml中添<property><name>dfs.append.support</name><value>true</value></property>*/
 private static void appendToHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf); 
 FSDataOutputStream out = fs.append(new Path(dst));

 int readLen = "zhangzk add by hdfs java api".getBytes().length;

 while(-1 != readLen){
 out.write("zhangzk add by hdfs java api".getBytes(), 0, readLen);
 }
 out.close();
 fs.close();
 }
 

 /**从HDFS上删除文件*/
 private static void deleteFromHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq-bak.txt"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 fs.deleteOnExit(new Path(dst));
 fs.close();
 }
 

 /**遍历HDFS上的文件和目录*/
 private static void getDirectoryFromHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 FileStatus fileList[] = fs.listStatus(new Path(dst));
 int size = fileList.length;
 for(int i = 0; i < size; i++){
 System.out.println("name:" + fileList[i].getPath().getName() + "/t/tsize:" + fileList[i].getLen());
 }
 fs.close();
 } 

}

注意:对于append操作,从hadoop-0.21版本开始就不支持了,关于Append的操作可以参考Javaeye上的一篇文档。

声明:本文内容来源于网络,版权归原作者所有,内容由互联网用户自发贡献自行上传,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任。如果您发现有涉嫌版权的内容,欢迎发送邮件至:notice#nhooo.com(发邮件时,请将#更换为@)进行举报,并提供相关证据,一经查实,本站将立刻删除涉嫌侵权内容。