一、Zookeeper客户端
1、ZooKeeper客户端
-
(1)使用SSH连接ZooKeeper服务器,然后进入到ZooKeeper安装目录下的bin目录下,然后使用
./zkCli.sh -timeout 5000 -r -server ip:port
建立客户端连接,其中-r表示zookeeper的只读模式,当需要连接的服务器和集群中过半数的服务器失去连接时,该服务器则不再处理客户端请求,但是有的时候任然希望出现这种情况的时候,该服务器还可以提供只读服务,则需要添加-r(-r为可选参数), ip是要连接的服务器的IP地址,port为端口号,默认端口号是2181[zk: 192.168.153.129:2181(CONNECTED) 0] help
ZooKeeper -server host:port cmd args
#查看节点状态
stat path [watch]
#设置节点的数据内容
set path data [version]
#查看节点列表
ls path [watch]
#删除节点配额 -n子节点个数, -b数据节点的长度
delquota [-n|-b] path
#查看节点下的子节点列表,并显示当前节点的的状态
ls2 path [watch]
#设置节点的权限
setAcl path acl
#设置节点的配额
setquota -n|-b val path
#查看历史命令
history
#重复执行历史命令中的哪一条命令 cmdno表示在历史命令中的ID编号
redo cmdno
printwatches on|off
#删除子节点为空的节点
delete path [version]
sync path
#查看配额
listquota path
#递归删除节点
rmr path
#查看节点数据
get path [watch]
#创建节点 -s表顺序创建,会自动增长ID,-e表是否为临时节点
create [-s] [-e] path data acl
addauth scheme auth
#退出客户端
quit
getAcl path
#关闭连接
close
#连接到其他的服务器
connect host:port
root@liumeng-1:/opt/zookeeper/bin# ./zkCli.sh -timeout 5000 -server 192.168.153.129:2181 在zookeeper中,每次对数据节点的写操作都是一个事务,每个事务都会有一个事务ID
-客户端指令详解
#(1)ls与stat指令
- ls path : 查看节点的子节点列表
-
state path : 查看节点状态
WatchedEvent state:SyncConnected type:None path:null
#查看节点下的子节点
[zk: 192.168.153.129:2181(CONNECTED) 0] ls /
[zookeeper]
#查看节点的状态
[zk: 192.168.153.129:2181(CONNECTED) 1] stat /
#创建该节点的事务ID
cZxid = 0x0
#创建该节点的时间
ctime = Thu Jan 01 08:00:00 CST 1970
#最后一次被修改的事务ID
mZxid = 0x0
#最后一次被修改的ID
mtime = Thu Jan 01 08:00:00 CST 1970
#该节点的子节列表最后一次被修改的事务ID,为当前节点添加节点,或者从当前节点中删除节点会引起子节点列表的改变,但是修改子节点的数据内容是不计算在内的
pZxid = 0x0
#子节点的版本号
cversion = -1
#数据版本号
dataVersion = 0
#权限版本号
aclVersion = 0
#专用于临时节点,创建该临时节点的事务ID,如果该节点是一个持久节点,那么该属性的值就为固定值0
ephemeralOwner = 0x0
#当前节点所存放的数据的长度
dataLength = 0
#当前节点所拥有的子节点的个数
numChildren = 1
#(2)get指令 : 获取节点的值
-
get path : 获取节点的值
[zk: 192.168.153.129:2181(CONNECTED) 5] get /zookeeper/node_1
#节点值
345
#节点状态,与stat指令的输出相同
cZxid = 0x1900000006
ctime = Wed Apr 19 15:24:59 CST 2017
mZxid = 0x1900000006
mtime = Wed Apr 19 15:24:59 CST 2017
pZxid = 0x1900000006
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 3
numChildren = 0
[zk: 192.168.153.129:2181(CONNECTED) 6]
#(3)ls2指令
-
ls2 path : 获取节点的子节点列表,并显示节点的状态
[zk: 192.168.153.129:2181(CONNECTED) 1] ls2 /zookeeper
[node_1, quota]
cZxid = 0x0
ctime = Thu Jan 01 08:00:00 CST 1970
mZxid = 0x0
mtime = Thu Jan 01 08:00:00 CST 1970
pZxid = 0x1900000006
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 2
#(4)create指令
-
create [-s] [-e] path data acl : 创建一个节点,-s表示是否为顺序节点,-e表示是否为临时节点,path为节点路径,data为节点数据,acl为节点权限
#创建普通节点(永久节点)
[zk: 192.168.153.129:2181(CONNECTED) 0] create /node_1 123
Created /node_1
[zk: 192.168.153.129:2181(CONNECTED) 2] get /node_1
123
cZxid = 0x190000000b
ctime = Wed Apr 19 15:36:40 CST 2017
mZxid = 0x190000000b
mtime = Wed Apr 19 15:36:40 CST 2017
pZxid = 0x190000000b
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 3
numChildren = 0
#创建临时节点,在结束客户端连接后再重新登录,临时节点就失效删除了
[zk: 192.168.153.129:2181(CONNECTED) 1] create -e /node_1/node_1_1 234
Created /node_1/node_1_1
[zk: 192.168.153.129:2181(CONNECTED) 2] ls /node_1
[node_1_1]
#退出客户端
[zk: 192.168.153.129:2181(CONNECTED) 3] quit
Quitting...
#重新连接客户端
root@liumeng-1:/opt/zookeeper/bin# ./zkCli.sh -timeout 5000 -server 192.168.153.129:2181
Connecting to 192.168.153.129:2181
#重新查询,节点的子节点为空
[zk: 192.168.153.129:2181(CONNECTED) 0] ls /node_1
[]
#创建顺序节点
[zk: 192.168.153.129:2181(CONNECTED) 1] create -s /node_1/node_1_1 234
#创建成功之后返回的节点名称
Created /node_1/node_1_10000000001
#同路径下继续创建相同名称的节点名,因为是顺序节点,会子啊节点后面自动递增ID
[zk: 192.168.153.129:2181(CONNECTED) 2] create -s /node_1/node_1_1 234
Created /node_1/node_1_10000000002
[zk: 192.168.153.129:2181(CONNECTED) 3] create -s /node_1/node_1_1 234
Created /node_1/node_1_10000000003
[zk: 192.168.153.129:2181(CONNECTED) 4] ls /node_1
[node_1_10000000001, node_1_10000000003, node_1_10000000002]
#(5)set指令
-
set path data [version] : 修改节点,version为当前节点的数据版本号,可选参数,version必须要与当前节点的数据版本号一致才可以修改成功,否则失败
#修改节点内容
[zk: 192.168.153.129:2181(CONNECTED) 3] set /node_1/node_1_10000000001 789
cZxid = 0x1900000011
ctime = Wed Apr 19 15:39:27 CST 2017
mZxid = 0x1900000016
mtime = Wed Apr 19 15:47:37 CST 2017
pZxid = 0x1900000011
cversion = 0
#数据版本号加了1
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 3
numChildren = 0
#查询节点内容
[zk: 192.168.153.129:2181(CONNECTED) 4] get /node_1/node_1_1000000000
789
cZxid = 0x1900000011
ctime = Wed Apr 19 15:39:27 CST 2017
mZxid = 0x1900000016
mtime = Wed Apr 19 15:47:37 CST 2017
pZxid = 0x1900000011
cversion = 0
#数据版本号加了1
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 3
numChildren = 0
#指定数据版本号修改
#版本号一致,修改成功,数据版本号加1
[zk: 192.168.153.129:2181(CONNECTED) 5] set /node_1/node_1_10000000001 987 2
cZxid = 0x1900000011
ctime = Wed Apr 19 15:39:27 CST 2017
mZxid = 0x190000001c
mtime = Wed Apr 19 15:51:52 CST 2017
pZxid = 0x1900000011
cversion = 0
dataVersion = 3
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 3
numChildren = 0
#版本号不一致,修改失败
[zk: 192.168.153.129:2181(CONNECTED) 6] set /node_1/node_1_10000000001 987 2
version No is not valid : /node_1/node_1_10000000001
#(6)delete指令与rmr指令
- delete path [version] : 删除节点,只能删除没有子节点的节点,否则报错,version与set的版本号作用一致
-
rmr path : 递归删除节点,会删除节点下的所有子节点
#delete删除没有子节点的节点
[zk: 192.168.153.129:2181(CONNECTED) 0] delete /node_1/node_1_10000000001
[zk: 192.168.153.129:2181(CONNECTED) 2] ls /node_1
[node_1_10000000003, node_1_10000000002]
#delete删除有子节点的节点
[zk: 192.168.153.129:2181(CONNECTED) 3] delete /node_1
Node not empty: /node_1
#rmr删除有子节点的节点
[zk: 192.168.153.129:2181(CONNECTED) 4] rmr /node_1
[zk: 192.168.153.129:2181(CONNECTED) 5] ls /
[zookeeper]
#(7)setquota指令
-
setquota -n|-b var path : 给节点设置配额,-n表示限制子节点的个数,-b表示限制子节点数据的长度, var为值, path路径
#设置配额,子节点的个数为2
[zk: 192.168.153.129:2181(CONNECTED) 3] setquota -n 2 /node_1
Comment: the parts are option -n val 2 path /node_1
#创建子节点
[zk: 192.168.153.129:2181(CONNECTED) 5] create -s /node_1/node_1_1 123
Created /node_1/node_1_10000000001
[zk: 192.168.153.129:2181(CONNECTED) 6] create -s /node_1/node_1_1 123
Created /node_1/node_1_10000000002
[zk: 192.168.153.129:2181(CONNECTED) 7] create -s /node_1/node_1_1 123
Created /node_1/node_1_10000000003
[zk: 192.168.153.129:2181(CONNECTED) 8] ls /node_1
[node_1_10000000001, node_1_10000000003, node_1_10000000002]
此处发现,节点个数超过配额设置之后并没有抛出异常,因为zookeeper中,如果子节点的个数超过配额限制,并不会抛出异常,而只是在日志中记录一条告警信息,日志在zookeeper安装目录下的bin目录中
#Quota exceeded: path count limit path为超出配额的节点路径,count为当前状态包括节点本身的子节点个数,limit子节点配额值
2017-04-19 16:04:15,460 [myid:3] - WARN [CommitProcessor:3:DataTree@301] - Quota exceeded: /node_1 count=3 limit=2
2017-04-19 16:04:18,128 [myid:3] - WARN [CommitProcessor:3:DataTree@301] - Quota exceeded: /node_1 count=4 limit=2
#(8)listquota指令
-
listquota path : 查看节点的配额
[zk: 192.168.153.129:2181(CONNECTED) 0] listquota /node_1
absolute path is /zookeeper/quota/node_1/zookeeper_limits
#当前节点的配额,子节点个数的配额为2,数据长度没有限制(-1表示没有限制)
Output quota for /node_1 count=2,bytes=-1
#当前节点的状态,count表示当前节点包括自己在内的子节点个数,bytes表示包括节点数据在内的所有子节点的数据长度的和
Output stat for /node_1 count=4,bytes=12
#(9)listquota指令
-
delquota [-n|-b] path : 删除节点的配额
[zk: 192.168.153.129:2181(CONNECTED) 0] listquota /node_1
absolute path is /zookeeper/quota/node_1/zookeeper_limits
Output quota for /node_1 count=2,bytes=-1
Output stat for /node_1 count=5,bytes=15
[zk: 192.168.153.129:2181(CONNECTED) 1] delquota -n /node_1
[zk: 192.168.153.129:2181(CONNECTED) 2] listquota /node_1
absolute path is /zookeeper/quota/node_1/zookeeper_limits
Output quota for /node_1 count=-1,bytes=-1
Output stat for /node_1 count=5,bytes=15