一.Hbase 下載
HBase 是一個分布式的、面向列的開源數(shù)據(jù)庫:Hbase API
1.Hbase 下載
Hbase 下載
跳轉(zhuǎn)到下載鏈接
二.Hbase 配置
1.單機部署
## 1.創(chuàng)建安裝目錄
mkdir -p /usr/local/hbase
## 2.將壓縮包拷貝到虛擬機并解壓縮
tar zxvf hbase-3.0.0-alpha-4-bin.tar.gz -C /usr/local/hbase/
## 3.添加環(huán)境變量
echo 'export HBASE_HOME=/usr/local/hbase/hbase-3.0.0-alpha-4' >> /etc/profile
echo 'export PATH=${HBASE_HOME}/bin:${PATH}' >> /etc/profile
source /etc/profile
## 4.指定 JDK 版本
echo 'export JAVA_HOME=/usr/local/java/jdk-11.0.19' >> $HBASE_HOME/conf/hbase-env.sh
## 5.創(chuàng)建 hbase 存儲目錄
mkdir -p /home/hbase/data
## 6.修改配置
vim $HBASE_HOME/conf/hbase-site.xml
添加如下信息
<property>
<name>hbase.rootdir</name>
<value>file:///home/hbase/data</value>
</property>
## 1.進入安裝目錄
cd $HBASE_HOME
## 2.啟動服務(wù)
./bin/start-hbase.sh
## 1.進入安裝目錄
cd $HBASE_HOME
## 2.關(guān)閉服務(wù)
./bin/stop-hbase.sh
2.偽集群部署(基于單機配置)
## 1.修改 hbase-env.sh
echo 'export JAVA_HOME=/usr/local/java/jdk-11.0.19' >> $HBASE_HOME/conf/hbase-env.sh
echo 'export HBASE_MANAGES_ZK=true' >> $HBASE_HOME/conf/hbase-env.sh
## 2.修改 hbase_site.xml
vim $HBASE_HOME/conf/hbase-site.xml
<!-- 將 hbase 數(shù)據(jù)保存到 hdfs -->
<property>
<name>hbase.rootdir</name>
<value>hdfs://nn/hbase</value>
</property>
<!-- 分布式配置 -->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<!-- 配置 ZK 地址 -->
<property>
<name>hbase.zookeeper.quorum</name>
<value>nn</value>
</property>
<!-- 配置 JK 地址 -->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
## 3.修改 regionservers 的 localhost 為 nn
echo nn > $HBASE_HOME/conf/regionservers
## 1.進入安裝目錄
cd $HADOOP_HOME
## 2.啟動 hadoop 服務(wù)
./sbin/start-all.sh
## 1.進入安裝目錄
cd $HBASE_HOME
## 2.啟動服務(wù)
./bin/start-hbase.sh
## 1.進入安裝目錄
cd $HBASE_HOME
## 2.關(guān)閉主節(jié)點服務(wù)(直接關(guān)服務(wù)是關(guān)不掉的,如圖)
. bin/hbase-daemon.sh stop master
## 3.關(guān)閉服務(wù)
./bin/stop-hbase.sh
3.集群部署
## 1.創(chuàng)建 zookeeper 數(shù)據(jù)目錄
mkdir -p $HBASE_HOME/zookeeper/data
## 2.進入安裝目錄
cd $HBASE_HOME/conf
## 3.修改環(huán)境配置
vim hbase-env.sh
## 添加 JDK / 啟動外置 Zookeeper
# JDK
export JAVA_HOME=/usr/local/java/jdk-11.0.19
# Disable Zookeeper
export HBASE_MANAGES_ZK=false
## 4.修改 hbase-site.xml
vim hbase-site.xml
## 配置如下信息
<!--允許的最大同步時鐘偏移-->
<property>
<name>hbase.master.maxclockskew</name>`
<value>6000</value>
</property>
<!--配置 HDFS 存儲實例-->
<property>
<name>hbase.rootdir</name>
<value>hdfs://nn:9000/hbase</value>
</property>
<!--啟用分布式配置-->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<!--配置 zookeeper 集群節(jié)點-->
<property>
<name>hbase.zookeeper.quorum</name>
<value>zk1,zk2,zk3</value>
</property>
<!--配置 zookeeper 數(shù)據(jù)目錄-->
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/usr/local/hbase/hbase-3.0.0-alpha-4/zookeeper/data</value>
</property>
<!-- Server is not running yet -->
<property>
<name>hbase.wal.provider</name>
<value>filesystem</value>
</property>
## 5.清空 regionservers 并添加集群節(jié)點域名
echo '' > regionservers
echo 'nn' >> regionservers
echo 'nd1' >> regionservers
echo 'nd2' >> regionservers
## 6.分別為 nd1 / nd2 創(chuàng)建 hbase 目錄
mkdir -p /usr/local/hbase
## 7.分發(fā) hbase 配置到另外兩臺虛擬機 nd1 / nd2
scp -r /usr/local/hbase/hbase-3.0.0-alpha-4 root@nd1:/usr/local/hbase
scp -r /usr/local/hbase/hbase-3.0.0-alpha-4 root@nd2:/usr/local/hbase
## 8.分發(fā)環(huán)境變量配置
scp /etc/profile root@nd1:/etc/profile
scp /etc/profile root@nd2:/etc/profile
1.啟動 hadoop 集群
Hadoop 集群搭建參考:Hadoop 搭建
## 1.啟動 hadoop
cd $HADOOP_HOME
. sbin/start-all.sh
## 1.關(guān)閉 hadoop 安全模式
hadoop dfsadmin -safemode leave
2.啟動 zookeeper 集群
ZOOKEEPER 集群搭建說明
## 1.啟動 zookeeper 集群
zkServer.sh start && ssh root@zk2 "source /etc/profile && zkServer.sh start && exit" && ssh root@zk3 "source /etc/profile && zkServer.sh start && exit"
## 2.查看狀態(tài)
zkServer.sh status && ssh root@zk2 "source /etc/profile && zkServer.sh status && exit" && ssh root@zk3 "source /etc/profile && zkServer.sh status && exit"
3.啟動 hbase 集群
## 1.分別為 nn /nd1 / nd2 配置 zookeeper 域名解析
echo '192.168.1.100 zk1' >> /etc/hosts
echo '192.168.1.101 zk2' >> /etc/hosts
echo '192.168.1.102 zk3' >> /etc/hosts
## 2.啟動 habase
cd $HBASE_HOME
. bin/start-hbase.sh
## 3.停止服務(wù)
. bin/hbase-daemon.sh stop master
. bin/hbase-daemon.sh stop regionserver
. bin/stop-hbase.sh
查看 UI 監(jiān)控:http://192.168.1.6:16010/master-status
4.集群啟停腳本
#!/bin/bash
case $1 in
"start")
## start hadoop
start-all.sh
## start zookeeper (先配置免密登錄)
zkServer.sh start && ssh root@zk2 "source /etc/profile && zkServer.sh start && exit" && ssh root@zk3 "source /etc/profile && zkServer.sh start && exit"
## start hbase
start-hbase.sh
;;
"stop")
## stop hbase
ssh root@nd1 "source /etc/profile && hbase-daemon.sh stop regionserver && stop-hbase.sh && exit"
ssh root@nd2 "source /etc/profile && hbase-daemon.sh stop regionserver && stop-hbase.sh && exit"
hbase-daemon.sh stop master && hbase-daemon.sh stop regionserver && stop-hbase.sh
## stop zookeeper
zkServer.sh stop && ssh root@zk2 "source /etc/profile && zkServer.sh stop && exit" && ssh root@zk3 "source /etc/profile && zkServer.sh stop && exit"
## stop hadoop
stop-all.sh
;;
*)
echo "pls inout start|stop"
;;
esac
三.測試
## 1.為 Windows 增加 Hosts 配置,添加 Hbase 集群域名解析 編輯如下文件
C:\Windows\System32\drivers\etc\hosts
## 2.增加如下信息
192.168.1.6 nn
192.168.1.7 nd1
192.168.1.8 nd2
測試配置效果
JDK 版本
工程結(jié)構(gòu)
1.Pom 配置
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.example</groupId>
<artifactId>hbase-demo</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
<spring.version>2.7.8</spring.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.28</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>2.0.32</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>3.0.0-alpha-4</version>
</dependency>
</dependencies>
</project>
2.Yml 配置
hbase:
zookeeper:
quorum: 192.168.1.100,192.168.1.101,192.168.1.102
property:
clientPort: 2181
master:
ip: 192.168.1.6
port: 16000
3.Hbase 配置類
package org.example.config;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* @author Administrator
* @Description
* @create 2023-07-25 0:26
*/
@Configuration
public class HbaseConfig {
@Value("${hbase.zookeeper.quorum}")
private String zookeeperQuorum;
@Value("${hbase.zookeeper.property.clientPort}")
private String clientPort;
@Value("${hbase.master.ip}")
private String ip;
@Value("${hbase.master.port}")
private int masterPort;
@Bean
public org.apache.hadoop.conf.Configuration hbaseConfiguration(){
org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum",zookeeperQuorum);
conf.set("hbase.zookeeper.property.clientPort",clientPort);
conf.set("hbase.masters", ip + ":" + masterPort);
conf.set("hbase.client.keyvalue.maxsize","20971520");
return HBaseConfiguration.create(conf);
}
}
4.Hbase 連接池配置
package org.example.config;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import java.util.Enumeration;
import java.util.Vector;
/**
* @author Administrator
* @Description
* @create 2023-07-25 22:39
*/
@Slf4j
@Component
public class HbaseConnectionPool {
/**
* 連接池的初始大小
* 連接池的創(chuàng)建步長
* 連接池最大的大小
*/
private int nInitConnectionAmount = 3;
private int nIncrConnectionAmount = 3;
private int nMaxConnections = 20;
/**
* 存放連接池中數(shù)據(jù)庫連接的向量
*/
private Vector vcConnections = new Vector();
/**
* 注入連接配置
*/
@Resource
private Configuration hbaseConfiguration;
/**
* 初始化連接
*/
@PostConstruct
public void init() {
createConnections(nInitConnectionAmount);
}
/**
* 獲取可用連接
* @return
*/
public synchronized Connection getConnection() {
Connection conn;
while (null == (conn =getFreeConnection())){
try {
wait(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
// 返回獲得的可用的連接
return conn;
}
/**
* 釋放連接
* @param conn
*/
public synchronized void releaseConnection(Connection conn) {
ConnectionWrapper connWrapper;
Enumeration enumerate = this.vcConnections.elements();
while(enumerate.hasMoreElements()) {
connWrapper = (ConnectionWrapper) enumerate.nextElement();
if (conn == connWrapper.getConnection()) {
connWrapper.setBusy(false);
break;
}
}
}
/**
* 獲取可用連接 當前無可用連接則創(chuàng)建 如果已達到最大連接數(shù)則返回 null 阻塞后重試獲取
* @return
*/
private Connection getFreeConnection() {
Connection conn;
if (null == (conn = findFreeConnection())) {
// 創(chuàng)建新連接
createConnections(nIncrConnectionAmount);
// 查看是否有可用連接
if (null == (conn = findFreeConnection())) {
return null;
}
}
return conn;
}
/**
* 查找可用連接
* @return
*/
private Connection findFreeConnection() {
ConnectionWrapper connWrapper;
//遍歷向量內(nèi)連接對象
Enumeration enumerate = vcConnections.elements();
while (enumerate.hasMoreElements()) {
connWrapper = (ConnectionWrapper) enumerate.nextElement();
//判斷當前連接是否被占用
if (!connWrapper.isBusy()) {
connWrapper.setBusy(true);
return connWrapper.getConnection();
}
}
// 返回 NULL
return null;
}
/**
* 創(chuàng)建新連接
* @param counts
*/
private void createConnections(int counts) {
// 循環(huán)創(chuàng)建指定數(shù)目的數(shù)據(jù)庫連接
try {
for (int i = 0; i < counts; i++) {
if (this.nMaxConnections > 0 && this.vcConnections.size() >= this.nMaxConnections) {
log.warn("已達到最大連接數(shù)...");
break;
}
// 創(chuàng)建一個新連接并加到向量
vcConnections.addElement(new ConnectionWrapper(newConnection()));
}
} catch (Exception e) {
log.error("創(chuàng)建連接失敗...");
}
}
/**
* 創(chuàng)建新連接
* @return
*/
private Connection newConnection() {
/** hbase 連接 */
Connection conn = null;
// 創(chuàng)建一個數(shù)據(jù)庫連接
try {
conn = ConnectionFactory.createConnection(hbaseConfiguration);
} catch (Exception e) {
log.error("HBase 連接失敗...");
}
// 返回創(chuàng)建的新的數(shù)據(jù)庫連接
return conn;
}
/**
* 封裝連接對象
*/
@Data
class ConnectionWrapper {
/**
* 數(shù)據(jù)庫連接
*/
private Connection connection;
/**
* 此連接是否正在使用的標志,默認沒有正在使用
*/
private boolean busy = false;
/**
* 構(gòu)造函數(shù),根據(jù)一個 Connection 構(gòu)告一個 PooledConnection 對象
*/
public ConnectionWrapper(Connection connection) {
this.connection = connection;
}
}
}
5.測試類
package org.example.controller;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.ColumnValueFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.util.Bytes;
import org.example.config.HbaseConnectionPool;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
import java.io.IOException;
import java.util.*;
/**
* @author Administrator
*
* 可利用 aop 進行連接獲取和釋放處理
*
* @Description
* @create 2023-07-25 23:06
*/
@Slf4j
@RestController
@RequestMapping("/hbase")
public class HbaseController {
@Resource
private HbaseConnectionPool pool;
/**
* 表名
*/
private String tbl_user = "tbl_user";
/**
* 創(chuàng)建表(不允許重復創(chuàng)建)
*/
@GetMapping("/create")
public void createTable(){
Connection conn = null;
//獲取連接
try {
conn = pool.getConnection();
Admin admin = conn.getAdmin();
TableName tableName = TableName.valueOf(tbl_user);
if (!admin.tableExists(tableName)){
//指定表名
TableDescriptorBuilder tdb_user = TableDescriptorBuilder.newBuilder(tableName);
//添加列族(info,data)
ColumnFamilyDescriptor hcd_info = ColumnFamilyDescriptorBuilder.of("name");
ColumnFamilyDescriptor hcd_data = ColumnFamilyDescriptorBuilder.of("age");
tdb_user.setColumnFamily(hcd_info);
tdb_user.setColumnFamily(hcd_data);
//創(chuàng)建表
TableDescriptor td = tdb_user.build();
admin.createTable(td);
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
}
/**
* 刪除表(不允許刪除不存在的表)
*/
@GetMapping("/drop")
public void dropTable(){
Connection conn = null;
try {
conn = pool.getConnection();
Admin admin = conn.getAdmin();
TableName tableName = TableName.valueOf(tbl_user);
if (admin.tableExists(tableName)){
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
}
/**
* 插入測試
*/
@GetMapping("/insert")
public void insert(){
log.info("---插入一列數(shù)據(jù)---1");
putData(tbl_user, "row1", "name", "a", "zhangSan");
putData(tbl_user, "row1", "age", "a", "18");
log.info("---插入多列數(shù)據(jù)---2");
putData(tbl_user, "row2", "name",
Arrays.asList("a", "b", "c"), Arrays.asList("liSi", "wangWu", "zhaoLiu"));
log.info("---插入多列數(shù)據(jù)---3");
putData(tbl_user, "row3", "age",
Arrays.asList("a", "b", "c"), Arrays.asList("18","19","20"));
log.info("---插入多列數(shù)據(jù)---4");
putData(tbl_user, "row4", "age",
Arrays.asList("a", "b", "c"), Arrays.asList("30","19","20"));
}
/**
* 插入數(shù)據(jù)(單條)
* @param tableName 表名
* @param rowKey rowKey
* @param columnFamily 列族
* @param column 列
* @param value 值
* @return true/false
*/
public boolean putData(String tableName, String rowKey, String columnFamily, String column,
String value) {
return putData(tableName, rowKey, columnFamily, Arrays.asList(column),
Arrays.asList(value));
}
/**
* 插入數(shù)據(jù)(批量)
* @param tableName 表名
* @param rowKey rowKey
* @param columnFamily 列族
* @param columns 列
* @param values 值
* @return true/false
*/
public boolean putData(String tableName, String rowKey, String columnFamily,
List<String> columns, List<String> values) {
Connection conn = null;
try {
conn = pool.getConnection();
Table table = conn.getTable(TableName.valueOf(tableName));
Put put = new Put(Bytes.toBytes(rowKey));
for (int i=0; i<columns.size(); i++) {
put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(columns.get(i)), Bytes.toBytes(values.get(i)));
}
table.put(put);
table.close();
return true;
} catch (IOException e) {
e.printStackTrace();
return false;
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
}
/**
* 查詢測試
*/
@GetMapping("/query")
public void getResultScanner(){
log.info("全表數(shù)據(jù):{}",getData(tbl_user));
log.info("過濾器,按年齡 [18]:{}",getData(tbl_user,new ColumnValueFilter(Bytes.toBytes("age"), Bytes.toBytes("a"), CompareOperator.EQUAL, Bytes.toBytes("18"))));
log.info("根據(jù) rowKey [row1]:{}",getData(tbl_user,"row1"));
log.info("根據(jù) rowKey 列族 列 [row2 name a]:{}",getData(tbl_user,"row2","name","a"));
}
/**
* 獲取數(shù)據(jù)(全表數(shù)據(jù))
* @param tableName 表名
* @return map
*/
public List<Map<String, String>> getData(String tableName) {
List<Map<String, String>> list = new ArrayList<>();
Connection conn = null;
try {
conn = pool.getConnection();
Table table = conn.getTable(TableName.valueOf(tableName));
Scan scan = new Scan();
ResultScanner resultScanner = table.getScanner(scan);
for(Result result : resultScanner) {
HashMap<String, String> map = new HashMap<>(result.listCells().size());
map.put("row", Bytes.toString(result.getRow()));
for (Cell cell : result.listCells()) {
//列族
String family = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
//列
String qualifier = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
//值
String data = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
map.put(family + ":" + qualifier, data);
}
list.add(map);
}
table.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
return list;
}
/**
* 獲取數(shù)據(jù)(根據(jù) filter)
* @param tableName 表名
* @param filter 過濾器
* @return map
*/
public List<Map<String, String>> getData(String tableName, Filter filter) {
List<Map<String, String>> list = new ArrayList<>();
Connection conn = null;
try {
conn = pool.getConnection();
Table table = conn.getTable(TableName.valueOf(tableName));
Scan scan = new Scan();
// 添加過濾器
scan.setFilter(filter);
ResultScanner resultScanner = table.getScanner(scan);
for(Result result : resultScanner) {
HashMap<String, String> map = new HashMap<>(result.listCells().size());
map.put("row", Bytes.toString(result.getRow()));
for (Cell cell : result.listCells()) {
String family = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
String qualifier = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
String data = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
map.put(family + ":" + qualifier, data);
}
list.add(map);
}
table.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
return list;
}
/**
* 獲取數(shù)據(jù)(根據(jù) rowKey)
* @param tableName 表名
* @param rowKey rowKey
* @return map
*/
public Map<String, String> getData(String tableName, String rowKey) {
HashMap<String, String> map = new HashMap<>();
Connection conn = null;
try {
conn = pool.getConnection();
Table table = conn.getTable(TableName.valueOf(tableName));
Get get = new Get(Bytes.toBytes(rowKey));
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
for (Cell cell : result.listCells()) {
String family = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
String qualifier = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
String data = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
map.put(family + ":" + qualifier, data);
}
}
table.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
return map;
}
/**
* 獲取數(shù)據(jù)(根據(jù) rowKey 列族 列)
* @param tableName 表名
* @param rowKey rowKey
* @param columnFamily 列族
* @param columnQualifier 列
* @return map
*/
public String getData(String tableName, String rowKey, String columnFamily,
String columnQualifier) {
String data = "";
Connection conn = null;
try {
conn = pool.getConnection();
Table table = conn.getTable(TableName.valueOf(tableName));
Get get = new Get(Bytes.toBytes(rowKey));
get.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(columnQualifier));
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
Cell cell = result.listCells().get(0);
data = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
}
table.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
return data;
}
/**
* 刪除數(shù)據(jù)
*/
@GetMapping("/delete")
public void delete(){
log.info("---刪除 rowKey --- row1 ");
deleteData(tbl_user,"row1");
log.info("---刪除 rowKey 列族 --- row2 age ");
deleteData(tbl_user,"row2","age");
}
/**
* 刪除數(shù)據(jù)(根據(jù) rowKey)
* @param tableName 表名
* @param rowKey rowKey
*/
public void deleteData(String tableName, String rowKey) {
Connection conn = null;
try {
conn = pool.getConnection();
Table table = conn.getTable(TableName.valueOf(tableName));
Delete delete = new Delete(Bytes.toBytes(rowKey));
table.delete(delete);
table.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
}
/**
* 刪除數(shù)據(jù)(根據(jù) row key,列族)
* @param tableName 表名
* @param rowKey rowKey
* @param columnFamily 列族
*/
public void deleteData(String tableName, String rowKey, String columnFamily) {
Connection conn = null;
try {
conn = pool.getConnection();
Table table = conn.getTable(TableName.valueOf(tableName));
Delete delete = new Delete(Bytes.toBytes(rowKey));
delete.addFamily(columnFamily.getBytes());
table.delete(delete);
table.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (null != conn){
pool.releaseConnection(conn);
}
}
}
}
6.啟動類
package org.example;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
/**
* @author Administrator
*/
@SpringBootApplication
public class HbaseDemo {
public static void main(String[] args) {
SpringApplication.run(HbaseDemo.class,args);
}
}
7.測試
創(chuàng)建表:http://127.0.0.1:8080/hbase/create
插入:http://127.0.0.1:8080/hbase/insert
查詢:http://127.0.0.1:8080/hbase/query
刪除:http://127.0.0.1:8080/hbase/delete
刪除表:http://127.0.0.1:8080/hbase/drop
查看 UI
測試輸出日志文章來源:http://www.zghlxwxcb.cn/news/detail-610215.html
文章來源地址http://www.zghlxwxcb.cn/news/detail-610215.html
到了這里,關(guān)于Hadoop 之 Hbase 配置與使用(四)的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!