這期內(nèi)容當(dāng)中小編將會給大家?guī)碛嘘P(guān)hadoop中怎么將文件上傳到指定datanode,文章內(nèi)容豐富且以專業(yè)的角度為大家分析和敘述,閱讀完這篇文章希望大家可以有所收獲。
創(chuàng)新互聯(lián)公司專注于沂南網(wǎng)站建設(shè)服務(wù)及定制,我們擁有豐富的企業(yè)做網(wǎng)站經(jīng)驗。 熱誠為您提供沂南營銷型網(wǎng)站建設(shè),沂南網(wǎng)站制作、沂南網(wǎng)頁設(shè)計、沂南網(wǎng)站官網(wǎng)定制、成都微信小程序服務(wù),打造沂南網(wǎng)絡(luò)公司原創(chuàng)品牌,更為您提供沂南網(wǎng)站排名全網(wǎng)營銷落地服務(wù)。
package hgs.dfsclient.test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.protocol.*; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.EnumSet; import java.util.List; import java.util.Random; public class MainTest { public static void main(String[] args) throws IOException, URISyntaxException { Configuration conf = new Configuration(); conf.addResource(new File("D://hdfsconf/core-site.xml").toURI().toURL()); conf.addResource(new File("D://hdfsconf/hdfs-site.xml").toURI().toURL()); String url = conf.get("fs.defaultFS"); System.out.println(url); Path path = new Path("/user/test.txt"); DFSClient client = new DFSClient(new URI("hdfs://192.168.0.191:9000/"),conf); //favor datanode InetSocketAddress [] favor = {new InetSocketAddress("192.168.0.191", 50012)}; createAndFillFile(client,conf,favor,"/user/test.txt4"); listBlockLocation(client, conf, "/user/test.txt4",true ); client.close(); } //將數(shù)據(jù)傳輸?shù)街付ǖ膁atanode public static void createAndFillFile(DFSClient client,Configuration conf, InetSocketAddress [] favor,String src) throws IOException { DFSOutputStream dfsOutputStream = client.create(src, FsCreateModes.applyUMask( FsPermission.getFileDefault(), FsPermission.getUMask(conf)), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), true, (short) 1, 32 * 1024 * 1024, null, 8192, null, favor, null); byte[] b = new byte[1024*3*3]; for(int i = 0 ;i<100000;i++){ fileByte(b); dfsOutputStream.write(b); } dfsOutputStream.close(); } static Random rand = new Random(System.nanoTime()); public static void fileByte(byte[] b){ if(b != null ){ rand.nextBytes(b); } } //獲取文件的block位置 public static void listBlockLocation(DFSClient client,Configuration conf,String src,boolean needLocation) throws IOException { DirectoryListing directoryListing = client.listPaths(src, HdfsFileStatus.EMPTY_NAME, needLocation); HdfsFileStatus[] fileStatuses = directoryListing.getPartialListing(); for(int i=0 ; ilocatedBlocks1 = locatedBlocks.getLocatedBlocks(); for(LocatedBlock lb : locatedBlocks1 ){ DatanodeInfo[] locations = lb.getLocations(); for(DatanodeInfo li : locations){ System.out.println("addr:"+li.getIpAddr()+"--port:"+li); } } } } }
上述就是小編為大家分享的hadoop中怎么將文件上傳到指定datanode了,如果剛好有類似的疑惑,不妨參照上述分析進行理解。如果想知道更多相關(guān)知識,歡迎關(guān)注創(chuàng)新互聯(lián)行業(yè)資訊頻道。