(原文地址:http://www.jb51.cc/cata/500599,转载麻烦带上原文地址。hadoop hive hbasemahout storm spark kafka flume,等连载中,做个爱分享的人)
HBase,可以使用shell界面管理数据,也提供java api 来操作Hbase上的表的数据。所有Column均以二进制格式存储,所以在api中要不断的类型转换。
//1 准备HBase上下文 HBaseAdmin 和user表对象hTable
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.rootdir","hdfs://rdc:9000/HRoot");
conf.set("hbase.zookeeper.quorum","p1");
Configuration conf= HBaseConfiguration.create();
HConnection conn= HConnectionManager.createConnection(conf);
HTableInterface userTable = conn.getTable("user");
//2查->Get,HBase只能根据ROW KEY 做随机查。默认是没有二级索引的。相当于只能 SELECT u.name FROM user u where u.id = 2; 不能对非主键的查询 SELECT u.name FROM user u where u.name = "rdc";
String id = "1"; //row key行键
String columnFamily = "info"; //列镞
String colume = "name"; //列名
Get get = new Get(id.getBytes());
final Result result = hTable.get(get);
byte[] valBytes = result.getValue(columnFamily .getBytes(),colume getBytes());
String value = new String(value); // value 相当于sql语句 SELECT u.name FROM user u where u.id = 1,的结果
//2增->Put ,使用Put 一列一列的加。sql: insert into user(name) values("mosi")
//3改->Put ,使用Put 一列一列的改。sql:update user set name "mosi"
Put put = new Put(id.getBytes());
put.add(columnFamily .getBytes(), colume getBytes(),"mosi".getBytes());
hTable.put(put);//和java的map一样。为空新增,不为空覆盖。
//4删 -》sql: delete from user where id = 1
Delete delete = new Delete(id.getBytes());
hTable.delete(delete);
//5 扫描,HBASE还提供scan方式根据rowkey范围扫描,不指定rowkey范围,则为全表扫描 。一次网络连接获取 批量数据缓存在客户端内存中。相对应GET来说。节省网络的连接次数。
// sql: SELECT * FROM user u
Scan scan = new Scan();
final ResultScanner scanner = hTable.getScanner(scan);
for (Result result : scanner) {
final byte[] value = result.getValue(columnFamily .getBytes(),colume.getBytes());
}