本文以HBase 0.90.2为例,介绍如何在Windows系统,Eclipse IDE集成环境下,使用Java语言,进行HBase客户端编程,包含建立表、删除表、插入记录、删除记录、各种方式下的查询操作等。
1. 准备工作
1、下载后安装jdk包(这里使用的是jdk-6u10-rc2-bin-b32-windows-i586-p-12_sep_2008);
2、下载eclipse,解压到本地(这里使用的是eclipse-java-helios-SR2-win32);
3、下载HBase包,解压安装包到本地(这里使用的是hbase-0.90.2)。
2. 搭建开发环境
1、运行Eclipse,创建一个新的Java工程“HBaseClient”,右键项目根目录,选择“Properties”->“Java Build Path”->“Library”->“Add External JARs”,将HBase解压后根目录下的hbase-0.90.2.jar、hbase-0.90.2-tests.jar和lib子目录下所有jar包添加到本工程的Classpath下。
2、按照步骤1中的操作,将自己所连接的HBase的配置文件hbase-site.xml添加到本工程的Classpath中,如下所示为配置文件的一个示例。
123456789101112131415161718 | <configuration><property><name>hbase.rootdir</name><value> hdfs://hostname:9000/hbase </value></property><property><name>hbase.cluster.distributed</name><value>true</value></property><property><name>hbase.zookeeper.quorum</name><value>*.*.*.*, *.*.*.*, *.*.*.*</value></property><propertyskipInDoc="true"><name>hbase.defaults.for.version</name><value>0.90.2</value></property></configuration> |
3、下面可以在Eclipse环境下进行HBase编程了。
3. HBase基本操作代码示例
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
import
java.io.IOException;
import
java.util.ArrayList;
import
java.util.List;
import
org.apache.hadoop.conf.Configuration;
import
org.apache.hadoop.hbase.HBaseConfiguration;
import
org.apache.hadoop.hbase.HColumnDescriptor;
import
org.apache.hadoop.hbase.HTableDescriptor;
import
org.apache.hadoop.hbase.KeyValue;
import
org.apache.hadoop.hbase.MasterNotRunningException;
import
org.apache.hadoop.hbase.ZooKeeperConnectionException;
import
org.apache.hadoop.hbase.client.Delete;
import
org.apache.hadoop.hbase.client.Get;
import
org.apache.hadoop.hbase.client.HBaseAdmin;
import
org.apache.hadoop.hbase.client.HTable;
import
org.apache.hadoop.hbase.client.Put;
import
org.apache.hadoop.hbase.client.Result;
import
org.apache.hadoop.hbase.client.ResultScanner;
import
org.apache.hadoop.hbase.client.Scan;
import
org.apache.hadoop.hbase.util.Bytes;
public
class
CreateTable {
private
static
Configuration conf =
null
;
// 初始化配置
static
{
conf = HBaseConfiguration.create();
}
// 1、建表
public
static
void
createTable(String tablename, String[] cfs)
throws
IOException {
HBaseAdmin admin =
new
HBaseAdmin(conf);
if
(admin.tableExists(tablename)) {
System.out.println(
"表已经存在!"
);
}
else
{
HTableDescriptor tableDesc =
new
HTableDescriptor(tablename);
for
(
int
i =
0
; i < cfs.length; i++) {
// 表建好后,列簇不能动态增加,而列是可以动态增加的,这是hbase伸缩性的一个体现。
tableDesc.addFamily(
new
HColumnDescriptor(cfs[i]));
}
admin.createTable(tableDesc);
System.out.println(
"表创建成功!"
);
}
}
// 2、插入数据
public
static
void
writeRow(String tablename, String[] cfs) {
try
{
HTable table =
new
HTable(conf, tablename);
Put put =
new
Put(Bytes.toBytes(
"rows1"
));
for
(
int
j =
0
; j < cfs.length; j++) {
put.add(Bytes.toBytes(cfs[j]),
// 指定列簇
Bytes.toBytes(String.valueOf(
"列1"
)),
// 指定列名
Bytes.toBytes(
"value_13"
));
// 指定列值
put.add(Bytes.toBytes(cfs[j]),
Bytes.toBytes(String.valueOf(
"lie2"
)),
Bytes.toBytes(
"value_24"
));
table.put(put);
System.out.println(
"插入数据成功"
);
}
}
catch
(IOException e) {
e.printStackTrace();
}
}
// 3、删除一行数据
public
static
void
deleteRow(String tablename, String rowkey)
throws
IOException {
HTable table =
new
HTable(conf, tablename);
List list =
new
ArrayList();
Delete d1 =
new
Delete(rowkey.getBytes());
list.add(d1);
table.delete(list);
System.out.println(
"删除行成功!"
);
}
// 4、查找一行数据
public
static
void
selectRow(String tablename, String rowKey)
throws
IOException {
HTable table =
new
HTable(conf, tablename);
Get g =
new
Get(rowKey.getBytes());
Result rs = table.get(g);
for
(KeyValue kv : rs.raw()) {
System.out.print(
new
String(kv.getRow()) +
" "
);
// 行号
System.out.print(
new
String(kv.getFamily()) +
":"
);
// 列簇名
System.out.print(
new
String(kv.getQualifier()) +
" "
);
// 列名
System.out.print(kv.getTimestamp() +
" "
);
// 时间戳
System.out.println(
new
String(kv.getValue()));
// 单元格的值
}
}
// 5、查找全部数据
public
static
void
scanerTable(String tablename) {
try
{
HTable table =
new
HTable(conf, tablename);
Scan s =
new
Scan();
ResultScanner rs = table.getScanner(s);
for
(Result r : rs) {
KeyValue[] kv = r.raw();
for
(
int
i =
0
; i < kv.length; i++) {
System.out.print(
new
String(kv[i].getRow()) +
" "
);
System.out.print(
new
String(kv[i].getFamily()) +
":"
);
System.out.print(
new
String(kv[i].getQualifier()) +
" "
);
System.out.print(kv[i].getTimestamp() +
" "
);
System.out.println(
new
String(kv[i].getValue()));
}
}
}
catch
(IOException e) {
e.printStackTrace();
}
}
// 6、删除表
public
static
void
deleteTable(String tablename)
throws
IOException {
try
{
HBaseAdmin admin =
new
HBaseAdmin(conf);
admin.disableTable(tablename);
admin.deleteTable(tablename);
System.out.println(
"表删除成功!"
);
}
catch
(MasterNotRunningException e) {
e.printStackTrace();
}
catch
(ZooKeeperConnectionException e) {
e.printStackTrace();
}
}
public
static
void
main(String[] args)
throws
IOException {
String[] cfs =
"a,b,c"
.split(
","
);
// createTable("test01", cfs);
// writeRow("test01", cfs);
// deleteRow("test01", "rows1");
// selectRow("test01", "rows2");
// scanerTable("test01");
// deleteTable("test01");
}
}
import
java.io.IOException;
import
java.util.ArrayList;
import
java.util.List;
import
org.apache.hadoop.conf.Configuration;
import
org.apache.hadoop.hbase.HBaseConfiguration;
import
org.apache.hadoop.hbase.HColumnDescriptor;
import
org.apache.hadoop.hbase.HTableDescriptor;
import
org.apache.hadoop.hbase.KeyValue;
import
org.apache.hadoop.hbase.MasterNotRunningException;
import
org.apache.hadoop.hbase.ZooKeeperConnectionException;
import
org.apache.hadoop.hbase.client.Delete;
import
org.apache.hadoop.hbase.client.Get;
import
org.apache.hadoop.hbase.client.HBaseAdmin;
import
org.apache.hadoop.hbase.client.HTable;
import
org.apache.hadoop.hbase.client.Put;
import
org.apache.hadoop.hbase.client.Result;
import
org.apache.hadoop.hbase.client.ResultScanner;
import
org.apache.hadoop.hbase.client.Scan;
import
org.apache.hadoop.hbase.util.Bytes;
public
class
CreateTable {
private
static
Configuration conf =
null
;
// 初始化配置
static
{
conf = HBaseConfiguration.create();
}
// 1、建表
public
static
void
createTable(String tablename, String[] cfs)
throws
IOException {
HBaseAdmin admin =
new
HBaseAdmin(conf);
if
(admin.tableExists(tablename)) {
System.out.println(
"表已经存在!"
);
}
else
{
HTableDescriptor tableDesc =
new
HTableDescriptor(tablename);
for
(
int
i =
0
; i < cfs.length; i++) {
// 表建好后,列簇不能动态增加,而列是可以动态增加的,这是hbase伸缩性的一个体现。
tableDesc.addFamily(
new
HColumnDescriptor(cfs[i]));
}
admin.createTable(tableDesc);
System.out.println(
"表创建成功!"
);
}
}
// 2、插入数据
public
static
void
writeRow(String tablename, String[] cfs) {
try
{
HTable table =
new
HTable(conf, tablename);
Put put =
new
Put(Bytes.toBytes(
"rows1"
));
for
(
int
j =
0
; j < cfs.length; j++) {
put.add(Bytes.toBytes(cfs[j]),
// 指定列簇
Bytes.toBytes(String.valueOf(
"列1"
)),
// 指定列名
Bytes.toBytes(
"value_13"
));
// 指定列值
put.add(Bytes.toBytes(cfs[j]),
Bytes.toBytes(String.valueOf(
"lie2"
)),
Bytes.toBytes(
"value_24"
));
table.put(put);
System.out.println(
"插入数据成功"
);
}
}
catch
(IOException e) {
e.printStackTrace();
}
}
// 3、删除一行数据
public
static
void
deleteRow(String tablename, String rowkey)
throws
IOException {
HTable table =
new
HTable(conf, tablename);
List list =
new
ArrayList();
Delete d1 =
new
Delete(rowkey.getBytes());
list.add(d1);
table.delete(list);
System.out.println(
"删除行成功!"
);
}
// 4、查找一行数据
public
static
void
selectRow(String tablename, String rowKey)
throws
IOException {
HTable table =
new
HTable(conf, tablename);
Get g =
new
Get(rowKey.getBytes());
Result rs = table.get(g);
for
(KeyValue kv : rs.raw()) {
System.out.print(
new
String(kv.getRow()) +
" "
);
// 行号
System.out.print(
new
String(kv.getFamily()) +
":"
);
// 列簇名
System.out.print(
new
String(kv.getQualifier()) +
" "
);
// 列名
System.out.print(kv.getTimestamp() +
" "
);
// 时间戳
System.out.println(
new
String(kv.getValue()));
// 单元格的值
}
}
// 5、查找全部数据
public
static
void
scanerTable(String tablename) {
try
{
HTable table =
new
HTable(conf, tablename);
Scan s =
new
Scan();
ResultScanner rs = table.getScanner(s);
for
(Result r : rs) {
KeyValue[] kv = r.raw();
for
(
int
i =
0
; i < kv.length; i++) {
System.out.print(
new
String(kv[i].getRow()) +
" "
);
System.out.print(
new
String(kv[i].getFamily()) +
":"
);
System.out.print(
new
String(kv[i].getQualifier()) +
" "
);
System.out.print(kv[i].getTimestamp() +
" "
);
System.out.println(
new
String(kv[i].getValue()));
}
}
}
catch
(IOException e) {
e.printStackTrace();
}
}
// 6、删除表
public
static
void
deleteTable(String tablename)
throws
IOException {
try
{
HBaseAdmin admin =
new
HBaseAdmin(conf);
admin.disableTable(tablename);
admin.deleteTable(tablename);
System.out.println(
"表删除成功!"
);
}
catch
(MasterNotRunningException e) {
e.printStackTrace();
}
catch
(ZooKeeperConnectionException e) {
e.printStackTrace();
}
}
public
static
void
main(String[] args)
throws
IOException {
String[] cfs =
"a,b,c"
.split(
","
);
// createTable("test01", cfs);
// writeRow("test01", cfs);
// deleteRow("test01", "rows1");
// selectRow("test01", "rows2");
// scanerTable("test01");
// deleteTable("test01");
}
}
REF:
【Hbase】Java对hbase的操作
http://smallwildpig.iteye.com/blog/1695203