這篇文章主要介紹了hadoop中如何實(shí)現(xiàn)DBInputFormat,具有一定借鑒價(jià)值,感興趣的朋友可以參考下,希望大家閱讀完這篇文章之后大有收獲,下面讓小編帶著大家一起了解一下。
成都創(chuàng)新互聯(lián)成立與2013年,是專(zhuān)業(yè)互聯(lián)網(wǎng)技術(shù)服務(wù)公司,擁有項(xiàng)目網(wǎng)站設(shè)計(jì)制作、成都網(wǎng)站制作網(wǎng)站策劃,項(xiàng)目實(shí)施與項(xiàng)目整合能力。我們以讓每一個(gè)夢(mèng)想脫穎而出為使命,1280元瓊山做網(wǎng)站,已為上家服務(wù),為瓊山各地企業(yè)和個(gè)人服務(wù),聯(lián)系電話:18980820575
代碼未做測(cè)試,先做記錄
package com.test; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.net.URI; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.db.DBConfiguration; import org.apache.hadoop.mapreduce.lib.db.DBInputFormat; import org.apache.hadoop.mapreduce.lib.db.DBWritable; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; /** * 要運(yùn)行本示例 * 1、把MySQL的jdbc驅(qū)動(dòng)放到taskTracker的lib目錄下,重啟集群 * */ public class WordCountDB extends Configured implements Tool { private String OUT_PATH = "hdfs://grid131:9000/output"; public static class Map extends Mapper{ public void map(LongWritable key, MyUser value, Context context) throws IOException, InterruptedException { context.write(key, new Text(value.toString())); } } public int run(String[] args) throws Exception { Configuration conf = this.getConf(); DBConfiguration.configureDB(conf, "com.mysql.jdbc.Driver", "jdbc:mysql://grid131:3306/test", "root", "admin"); //輸出路徑如果存在,則刪除 FileSystem fs = FileSystem.get(new URI(OUT_PATH), conf); fs.delete(new Path(OUT_PATH), true); Job job = new Job(conf, WordCountDB.class.getSimpleName()); job.setJarByClass(WordCountDB.class); FileOutputFormat.setOutputPath(job, new Path(args[1])); //指定不需要reduce,直接把map輸出寫(xiě)入到hdfs中 job.setNumReduceTasks(0); job.setInputFormatClass(DBInputFormat.class); //指定表、字段 //DBInputFormat.setInput(job, inputClass, tableName, conditions, orderBy, fieldNames) DBInputFormat.setInput(job, MyUser.class, "myuser", null, null, "id", "name"); job.setMapperClass(Map.class); //當(dāng)reduce輸出類(lèi)型與map輸出類(lèi)型一致時(shí),map的輸出類(lèi)型可以不設(shè)置 job.setMapOutputKeyClass(LongWritable.class); job.setMapOutputValueClass(Text.class); job.waitForCompletion(true); return job.isSuccessful()?0:1; } public static void main(String[] args) throws Exception { int exit = ToolRunner.run(new WordCount(), args); System.exit(exit); } } class MyUser implements Writable, DBWritable { private Long id; private String name; public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } @Override public void write(DataOutput out) throws IOException { out.writeLong(this.id); Text.writeString(out, this.name); } @Override public void readFields(DataInput in) throws IOException { this.id = in.readLong(); this.name = Text.readString(in); } @Override public void write(PreparedStatement statement) throws SQLException { statement.setLong(1, this.id); statement.setString(2, this.name); } @Override public void readFields(ResultSet resultSet) throws SQLException { this.id = resultSet.getLong(1); this.name = resultSet.getString(2); } @Override public String toString() { return this.id + "\t" + this.name; } }
感謝你能夠認(rèn)真閱讀完這篇文章,希望小編分享的“hadoop中如何實(shí)現(xiàn)DBInputFormat”這篇文章對(duì)大家有幫助,同時(shí)也希望大家多多支持創(chuàng)新互聯(lián),關(guān)注創(chuàng)新互聯(lián)行業(yè)資訊頻道,更多相關(guān)知識(shí)等著你來(lái)學(xué)習(xí)!