1.redis 安装
/data/soft 下载目录
/opt/redis_6379/{conf,logs,pid} 安装目录,日志目录,pid目录,配置目录
/data/redis_6379/ 数据目录
mkdir -p /data/soft
mkdir -p /opt/redis_6379/{conf,logs,pid}
mkdir -p /data/redis_6379/
yum install wget gcc-c++ -y
cd /data/soft
wget http://download.redis.io/releases/redis-5.0.7.tar.gz
tar xf redis-5.0.7.tar.gz -C /opt/
ln -s /opt/redis-5.0.7 /opt/redis
cd /opt/redis
make
make install #此命令只是添加环境变量
cat > /opt/redis_6379/conf/redis_6379.conf <<EOF
### 以守护进程模式启动
daemonize yes
### 绑定的主机地址
bind 10.0.0.171
### 监听端口
port 6379
### pid文件和log文件的保存地址
pidfile /opt/redis_6379/pid/redis_6379.pid
logfile /opt/redis_6379/logs/redis_6379.log
### 设置数据库的数量,默认数据库为0
databases 16
### 指定本地持久化文件的文件名,默认是dump.rdb
dbfilename redis_6379.rdb
### 本地数据库的目录
dir /data/redis_6379
EOF
redis-server /opt/redis_6379/conf/redis_6379.conf
[root@db01 redis]# redis-cli -h 10.0.0.171 登录
/opt/redis/utils/install_server.sh 默认是安装文件 安装后会生成系统指定的目录
2.redis 命令基础使用
set k1 v1 设置key赋值
get k1 查看key
keys * 查看里面key
TYPE k1 查看类型
DBSIZE 查看多少key
字符串的命令
INCR k2 自增 1
INCRBY k2 100 自增100
INCRBY k1 -1 自减1
INCRBY k1 -100 自减100
MSET k3 v3 k4 v4 k5 v5 批量增加key 但是会覆盖之前的key值
MGET k3 k5
EXISTS K7
(integer) 0 不存在
(integer) 1 存在
db01:6379> DEL K3 删除key
(integer) 0
db01:6379> EXPIRE k1 100 键过期时间
(integer) 1
TTL k1 查看过期时间
-1 代表永不过期
-2 没有key
-N N秒后过期
取消过期
PERSIST k1 取消时间
set k1 v1 重新赋值
列表命令
RPUSH list1 1 2 3 4 5 右边插入 无左边插入命令
LPUSH list1 a b 左边插入
LLEN list1 查看列表多长
LRANGE list1 0 -1 查看列表list1 所有
RPOP list1 右边移走1个
LPOP list1 左边移走1个 删除
DEL list1 删除list1列表
db01:6379> RPUSH list1 1 2 3 4 5
(integer) 5
db01:6379> LPUSH list1 a b
(integer) 7
db01:6379> LLEN list1
(integer) 7
db01:6379> LRANGE list1 0 -1
1) "b"
2) "a"
3) "1"
4) "2"
5) "3"
6) "4"
7) "5"
db01:6379> RPOP list1
"5"
db01:6379> LPOP list1
"b"
db01:6379> LRANGE list1 0 -1
1) "a"
2) "1"
3) "2"
4) "3"
5) "4"
db01:6379> DEL list1
(integer) 1
hash操作 缓存 cook
HMSET user:1000 username test age 33 work it 赋值
HMGET user:1000 username age work 查询数据
HGETALL user:1000 查询所有
db01:6379> HMSET user:1000 username test age 33 work it
OK 相当于mysql select * from user where id=1000;
db01:6379> HMGET user:1000 age
1) "33"
db01:6379> hgetall user:1000
1) "username"
2) "test"
3) "age"
4) "33"
5) "work"
6) "it"
集合类型
SADD set1 1 2 3 4 5 6 3 5 赋值 重复只算一次
SADD set2 1 0 2 4
SMEMBERS set2 查看赋值
SDIFF set1 set2 以set1为基础 查询跟set2不重复
SDIFF set2 set1 以set2为基础 查询跟set1不重复
SINTER set1 set2 求set1 set2 共有的数据 交集
SUNION set1 set2 求set1 set2 共有的数据 并集
db01:6379> SADD set1 1 2 3 4 5 6 3 5
(integer) 6
db01:6379> SADD set2 1 0 2 4
(integer) 4
db01:6379> keys *
1) "k4"
2) "k1"
3) "k3"
4) "user:1000"
5) "set1"
6) "set2"
db01:6379> SMEMBERS set1
1) "1"
2) "2"
3) "3"
4) "4"
5) "5"
6) "6"
db01:6379> SDIFF set1 set2
1) "3"
2) "5"
3) "6"
db01:6379> SDIFF set2 set1
1) "0"
db01:6379> SINTER set1 set2
1) "1"
2) "2"
3) "4"
db01:6379> SUNION set1 set2
1) "0"
2) "1"
3) "2"
4) "3"
5) "4"
6) "5"
7) "6"
3.redis持久化存储
https://redis.io/topics/persistence
RDB 做快照
AOF 相当于mysql binlog日志
RDB:类似于快照的形式,当前内存里的状态持久化到硬盘里
优点:压缩格式/恢复速度快
缺点:不是实时的,可能会丢失数据,操作比较重
AOF:类似于mysql的binlog,可以设置为每秒/每次操作以追加的形式持久化
优点:安全,最多损失1秒的数据,可读
缺点:文件比较大,恢复速度慢
当aof和rdb同时存在时,重启redis会优先读取aof的内容
rdb
bgsave 将内存中数据保存磁盘中
添加配置 自动保存
save 900 1 900秒(15分钟)内有1个key
save 300 10 300秒(5分钟)内有10个key
save 60 10000 60秒(1分钟)内有1000个key
for((i=1;i<=1000;i++));do redis-cli -h db01 set k_${i} v_${i} ; done
配置后 shutdown 也会自动保存文件 相当于 bgsave + shutdown
pkill kill -15 shutdown 会自动保存
kill -9 强制杀到进程 不会自动保存
AOF 日志 将之前的日志会汇总
执行的命令 aof记录 redis的数据
set k1 v1 set k1 k1
set k2 v2 set k2 k1 k2
set k3 v3 set k3 k1 k2 k3
del k1 del k1 k2 k3
del k2 del k2 k3
实际有意义的只有一条记录:
set k3
配置文件
##是否开启aof
appendonly yes
appendfilename "redis.aof"
##每秒写1次
appendfsync everysec
#每个命令写1次
appendfsync always
[root@redis01 ~]# vim /opt/redis_6379/conf/redis_6379.conf
### 以守护进程模式启动
daemonize yes
### 绑定的主机地址
bind 10.0.0.171
### 监听端口
port 6379
### pid文件和log文件的保存地址
pidfile /opt/redis_6379/pid/redis_6379.pid
logfile /opt/redis_6379/logs/redis_6379.log
### 设置数据库的数量,默认数据库为0
databases 16
### 指定本地持久化文件的文件名,默认是dump.rdb
save 900 1
save 300 10
save 60 10000
dbfilename redis_6379.rdb
### 本地数据库的目录
dir /data/redis_6379
##定本地持久化文件的文件名aof
appendonly yes
appendfilename "redis.aof"
appendfsync everysec
备份建议
1.开启混合模式
2.开启aof
3.不开启rdb
4.rdb采用定时任务的方式定时备份
如果设置了过期时间,恢复数据会如何处理
1.aof文件会记录下过期的时间
2.恢复的时候会去对比记录的过期时间和当前时间,如果超过了,就删除key
3.key的过期时间不受备份恢复影响
4.redis 安全认证
禁止 protected-mode
protected-mode yes/no 是否只允许本地访问
指定网卡
bind 127.0.0.1 192.168.122.102
指定密码
requirepass 密码
验证
redis-cli -a 密码
5. redis主从复制
db01:
cd /opt/
tar czf redis.tar.gz *
scp redis.tar.gz root@10.0.0.172:/opt
db02:
cd /opt/
tar xf redis.tar.gz
make install 创建环境变量
vim /opt/redis_6379/conf/redis_6379.conf
bind 10.0.0.172 修改ip地址
sed -i.bak '/10.0.0.171/c 10.0.0.172' /opt/redis_6379/conf/redis_6379.conf
redis-server /opt/redis_6379/conf/redis_6379.conf 启动服务
slaveof db01 6379 设置主从 写入配置文件中
redis-cli -h db02 slaveof db01 6379 临时 重启丢失主从
[root@db02 redis]# vim /opt/redis_6379/conf/redis_6379.conf 永久修改
### 以守护进程模式启动
daemonize yes
### 绑定的主机地址
bind 10.0.0.172
### 监听端口
port 6379
### pid文件和log文件的保存地址
pidfile /opt/redis_6379/pid/redis_6379.pid
logfile /opt/redis_6379/logs/redis_6379.log
### 设置数据库的数量,默认数据库为0
databases 16
### 指定本地持久化文件的文件名,默认是dump.rdb
save 900 1
save 300 10
save 60 10000
dbfilename redis_6379.rdb
### 本地数据库的目录
dir /data/redis_6379
appendonly yes
appendfilename "redis.aof"
appendfsync everysec
##主从配置
slaveof db01 6379
主从同步步骤:
从库发起同步请求
主库收到请求 后bgsave 保存当前主机的内存中数据
主库同步数据到从库
从库收到数据后 清空自己的数据
从库加载主库发过来的文件 加载到自己的内存中
SLAVEOF no one 取消主从复制
redis-cli -h db01 info Replication 查看主从
db01:6379> INFO Replication
# Replication
role:master
connected_slaves:1
slave0:ip=10.0.0.172,port=6379,state=online,offset=150,lag=1
master_replid:0799f0040323e39c7d70f129245e51bda37002d2
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:150
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:150
1.从节点只读不可写
2.从节点不会自动故障转移,它会一直同步主节点
10.0.0.172:6379> set k1 v1
(error) READONLY You can't write against a read only slave.
3.主从复制故障转移需要人工介入
- 修改代码指向REDIS的IP地址
- 从节点需要执行SLAVEOF no one
4.从节点会清空自己原有的数据,如果同步的对象写错了,就会导致数据丢失
5.从库和主库后续的同步依靠的是redis的SYNC协议,而不是RDB文件,RDB文件只是第一次建立同步时使用。
6.从库也可以正常的持久化文件
图片.png
6.redis 哨兵模式
6.1 设置主从模式
db01 db02 db03 设置主从模式
mkdir -p /data/soft
mkdir -p /opt/redis_6379/{conf,logs,pid}
mkdir -p /data/redis_6379/
yum install wget gcc-c++ -y
cd /data/soft
wget http://download.redis.io/releases/redis-5.0.7.tar.gz
tar xf redis-5.0.7.tar.gz -C /opt/
ln -s /opt/redis-5.0.7 /opt/redis
cd /opt/redis
make
make install #此命令只是添加环境变量
[root@db01 ~]# cat >/opt/redis_6379/conf/redis_6379.conf <<EOF
### 以守护进程模式启动
daemonize yes
### 绑定的主机地址
bind $(ifconfig eth0|awk 'NR==2{print $2}')
### 监听端口
port 6379
### pid文件和log文件的保存地址
pidfile /opt/redis_6379/pid/redis_6379.pid
logfile /opt/redis_6379/logs/redis_6379.log
### 设置数据库的数量,默认数据库为0
databases 16
### 指定本地持久化文件的文件名,默认是dump.rdb
save 900 1
save 300 10
save 60 10000
dbfilename redis_6379.rdb
### 本地数据库的目录
dir /data/redis_6379
appendonly yes
appendfilename "redis.aof"
appendfsync everysec
EOF
redis-server /opt/redis_6379/conf/redis_6379.conf 启动服务
db02 db03
rsync -avz root@10.0.0.171:/opt/* /opt/
cd /opt/redis/
make install
sed -i "/bind/c bind $(ifconfig eth0|awk 'NR==2{print $2}')" /opt/redis_6379/conf/redis_6379.conf
mkdir -p /data/redis_6379/
redis-server /opt/redis_6379/conf/redis_6379.conf
redis-cli -h 10.0.0.172 slaveof 10.0.0.171 6379 配置主从
redis-cli -h 10.0.0.173 slaveof 10.0.0.171 6379 配置主从
[root@db03 redis]# redis-cli -h 10.0.0.171 info replication
# Replication
role:master
connected_slaves:2
slave0:ip=10.0.0.172,port=6379,state=online,offset=78952,lag=1
slave1:ip=10.0.0.173,port=6379,state=online,offset=78952,lag=1
master_replid:d0d715115cc134a166a436671e8d7713e6786da3
master_replid2:0799f0040323e39c7d70f129245e51bda37002d2
master_repl_offset:78952
second_repl_offset:78859
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:78952
6.2 设置哨兵模式
db01 db02 db03 设置哨兵模式
mkdir -p /data/redis_26379
mkdir -p /opt/redis_26379/{conf,pid,logs}
cat >/opt/redis_26379/conf/redis_26379.conf << EOF
bind $(ifconfig eth0|awk 'NR==2{print $2}')
port 26379
daemonize yes
logfile /opt/redis_26379/logs/redis_26379.log
dir /data/redis_26379
#mymaster 主节点别名 主节点 ip 和端口, 判断主节点失败, 两个 sentinel 节点同意
sentinel monitor myredis 10.0.0.171 6379 2
#选项指定了 Sentinel 认为服务器已经断线所需的毫秒数
sentinel down-after-milliseconds myredis 3000
#向新的主节点发起复制操作的从节点个数, 1 轮询发起复制 一个一个复制
sentinel parallel-syncs myredis 1
#故障转移超时时间
sentinel failover-timeout myredis 18000
EOF
redis-sentinel /opt/redis_26379/conf/redis_26379.conf
[root@db01 ~]# redis-cli -h db01 -p 26379
db01:26379> info sentinel 集群信息
# Sentinel
sentinel_masters:1
sentinel_tilt:0
sentinel_running_scripts:0
sentinel_scripts_queue_length:0
sentinel_simulate_failure_flags:0
master0:name=myredis,status=ok,address=10.0.0.171:6379,slaves=2,sentinels=3
db01:26379> Sentinel get-master-addr-by-name myredis 查询主节点
1) "10.0.0.171"
2) "6379"
db01:26379> sentinel master myredis
1) "name"
2) "myredis"
3) "ip"
4) "10.0.0.171"
5) "port"
6) "6379"
7) "runid"
8) "8f5ec43205c37528cf8fe7f492c253f55ffb7635"
9) "flags"
10) "master"
11) "link-pending-commands"
12) "0"
13) "link-refcount"
14) "1"
15) "last-ping-sent"
16) "0"
17) "last-ok-ping-reply"
18) "55"
19) "last-ping-reply"
20) "55"
21) "down-after-milliseconds"
22) "3000"
23) "info-refresh"
24) "4220"
25) "role-reported"
26) "master"
27) "role-reported-time"
28) "6077714"
29) "config-epoch"
30) "0"
31) "num-slaves"
32) "2"
33) "num-other-sentinels"
34) "2"
35) "quorum"
36) "2"
37) "failover-timeout"
38) "18000"
39) "parallel-syncs"
40) "1"
6.3 控制优先级 故障切换
通过修改优先级来控制 故障切换到那台redis上
查询命令:CONFIG GET slave-priority 默认100
设置命令:CONFIG SET slave-priority 0
主动切换:sentinel failover myredis 在哨兵模式下切换
将需要调整的节点上
CONFIG SET slave-priority 0 将从100调整为0
[root@db02 ~]# redis-cli -h 10.0.0.172 -p 6379
10.0.0.172:6379> CONFIG SET slave-priority 0
OK
10.0.0.172:6379> CONFIG GET slave-priority
1) "slave-priority"
2) "0"
[root@db02 opt]# redis-cli -h db02 -p 26379 切换完成
db02:26379> sentinel failover myredis
OK
db02:26379> Sentinel get-master-addr-by-name myredis
1) "10.0.0.173"
2) "6379"
最后将之前改动修改回来
CONFIG SET slave-priority 100
7. 手动部署redis 集群 3.0 版本后出现
image.png7.1 配置各节点启动redis
mkdir -p /opt/redis_{6380,6381}/{conf,logs,pid}
mkdir -p /data/redis_{6380,6381}
cat >/opt/redis_6381/conf/redis_6381.conf <<EOF
##以守护进程
daemonize yes
##绑定网卡
bind $(ifconfig eth0|awk 'NR==2{print $2}')
##端口
port 6381
##pid log
pidfile "/opt/redis_6381/pid/redis_6381.pid"
logfile "/opt/redis_6381/logs/redis_6381.log"
##数据库数量 默认0
databases 16
##指定本地持久化文件
dbfilename "redis_6381.rdb"
##指定本地数据文件目录
dir "/data/redis_6381"
appendonly yes
appendfilename "redis.aof"
appendfsync everysec
##打开集群
cluster-enabled yes
##集群文件
cluster-config-file nodes_6381.conf
##集群15s切换
cluster-node-timeout 15000
EOF
cat >/opt/redis_6380/conf/redis_6380.conf <<EOF
##以守护进程
daemonize yes
##绑定网卡
bind $(ifconfig eth0|awk 'NR==2{print $2}')
##端口
port 6380
##pid log
pidfile "/opt/redis_6380/pid/redis_6380.pid"
logfile "/opt/redis_6380/logs/redis_6380.log"
##数据库数量 默认0
databases 16
##指定本地持久化文件
dbfilename "redis_6380.rdb"
##指定本地数据文件目录
dir "/data/redis_6380"
appendonly yes
appendfilename "redis.aof"
appendfsync everysec
cluster-enabled yes
cluster-config-file nodes_6380.conf
cluster-node-timeout 15000
EOF
redis-server /opt/redis_6380/conf/redis_6380.conf
redis-server /opt/redis_6381/conf/redis_6381.conf
注意 服务启动的时候 会同时启动1000以上的端口 通讯 生产环境下需要防火墙放行
7.2 将节点加入集群
redis-cli -h db01 -p 6380 CLUSTER MEET 10.0.0.172 6380
redis-cli -h db01 -p 6380 CLUSTER MEET 10.0.0.173 6380
redis-cli -h db01 -p 6380 CLUSTER MEET 10.0.0.173 6381
redis-cli -h db01 -p 6380 CLUSTER MEET 10.0.0.172 6381
redis-cli -h db01 -p 6380 CLUSTER MEET 10.0.0.171 6381
redis-cli -h db01 -p 6380 CLUSTER NODES
[root@db01 ~]# redis-cli -h db01 -p 6380 CLUSTER NODES 查看集群
d0cd29af7ebf22483a9b2faadbeb09b12f2a09d2 10.0.0.172:6380@16380 master - 0 1615208329000 1 connected
24ec035c46d24eac5da3be5612254e61617be34f 10.0.0.173:6380@16380 master - 0 1615208329033 2 connected
6bff3eff842c1fc2283768685334ecd9e43a101d 10.0.0.172:6381@16381 master - 0 1615208327024 4 connected
5b7c99c80b8f471ecaeed654fe389e5e2f7e302f 10.0.0.173:6381@16381 master - 0 1615208330038 3 connected
a15f99462b5ede942a795ef98c0630e9b60e0f27 10.0.0.171:6381@16381 master - 0 1615208328028 5 connected
dbc61856ac2cfb6a865cd95548d3c66a38f9f947 10.0.0.171:6380@16380 myself,master - 0 1615208328000 0 connected
7.3 集群分配槽位 共16383 几个节点除以几
db01:6380 5461 0-5460
db02:6380 5461 5461-10921
db03:6380 5462 10922-16383
redis-cli -h db01 -p 6380 CLUSTER ADDSLOTS {0..5460}
redis-cli -h db02 -p 6380 CLUSTER ADDSLOTS {5461..10921}
redis-cli -h db03 -p 6380 CLUSTER ADDSLOTS {10922..16383}
redis-cli -h db01 -p 6380 CLUSTER NODES
redis-cli -h db01 -p 6380 CLUSTER INFO
[root@db01 ~]# redis-cli -h db01 -p 6380 CLUSTER NODES
d0cd29af7ebf22483a9b2faadbeb09b12f2a09d2 10.0.0.172:6380@16380 master - 0 1615208496949 1 connected 5461-10921
24ec035c46d24eac5da3be5612254e61617be34f 10.0.0.173:6380@16380 master - 0 1615208495942 2 connected 10922-16383
6bff3eff842c1fc2283768685334ecd9e43a101d 10.0.0.172:6381@16381 master - 0 1615208497953 4 connected
5b7c99c80b8f471ecaeed654fe389e5e2f7e302f 10.0.0.173:6381@16381 master - 0 1615208497000 3 connected
a15f99462b5ede942a795ef98c0630e9b60e0f27 10.0.0.171:6381@16381 master - 0 1615208495000 5 connected
dbc61856ac2cfb6a865cd95548d3c66a38f9f947 10.0.0.171:6380@16380 myself,master - 0 1615208494000 0 connected 0-5460
[root@db01 ~]# redis-cli -h db01 -p 6380 CLUSTER INFO
cluster_state:ok 查看状态 显示ok 就正常了
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:5
cluster_my_epoch:0
cluster_stats_messages_ping_sent:252
cluster_stats_messages_pong_sent:273
cluster_stats_messages_meet_sent:6
cluster_stats_messages_sent:531
cluster_stats_messages_ping_received:272
cluster_stats_messages_pong_received:258
cluster_stats_messages_meet_received:1
cluster_stats_messages_received:531
7.4 设置主从关系 注意同一个主从不安排在一台主机上错开 从在前 主在后
redis-cli -h db01 -p 6381 CLUSTER REPLICATE 172的6380的ID
redis-cli -h db02 -p 6381 CLUSTER REPLICATE 173的6380的ID
redis-cli -h db03 -p 6381 CLUSTER REPLICATE 171的6380的ID
redis-cli -h db01 -p 6381 CLUSTER REPLICATE d0cd29af7ebf22483a9b2faadbeb09b12f2a09d2
redis-cli -h db02 -p 6381 CLUSTER REPLICATE 24ec035c46d24eac5da3be5612254e61617be34f
redis-cli -h db03 -p 6381 CLUSTER REPLICATE dbc61856ac2cfb6a865cd95548d3c66a38f9f947
[root@db01 ~]# redis-cli -h db01 -p 6380 CLUSTER NODES
d0cd29af7ebf22483a9b2faadbeb09b12f2a09d2 10.0.0.172:6380@16380 master - 0 1615208886788 1 connected 5461-10921
24ec035c46d24eac5da3be5612254e61617be34f 10.0.0.173:6380@16380 master - 0 1615208887000 2 connected 10922-16383
6bff3eff842c1fc2283768685334ecd9e43a101d 10.0.0.172:6381@16381 slave 24ec035c46d24eac5da3be5612254e61617be34f 0 1615208886000 4 connected
5b7c99c80b8f471ecaeed654fe389e5e2f7e302f 10.0.0.173:6381@16381 slave dbc61856ac2cfb6a865cd95548d3c66a38f9f947 0 1615208887793 3 connected
a15f99462b5ede942a795ef98c0630e9b60e0f27 10.0.0.171:6381@16381 slave d0cd29af7ebf22483a9b2faadbeb09b12f2a09d2 0 1615208888797 5 connected
dbc61856ac2cfb6a865cd95548d3c66a38f9f947 10.0.0.171:6380@16380 myself,master - 0 1615208884000 0 connected 0-5460
7.5 Redis Cluster ASK路由介绍
增加一个-c 会自动跳转到其他的分配节点插入数据 即使不是集群也可使用
redis-cli -c -h db01 -p 6381
测试数据的随机性
for i in {1..10000};do redis-cli -c -h db01 -p 6380 set k_${i} v_${i};echo ${i};done
[root@db01 ~]# redis-cli -h db01 -p 6380 dbsize
(integer) 3343
[root@db01 ~]# redis-cli -h db02 -p 6380 dbsize
(integer) 3314
[root@db01 ~]# redis-cli -h db03 -p 6380 dbsize
(integer) 3343
redis-cli -c -h db03 -p 6380 keys \* > keys_all.txt
cat keys_all.txt |awk -F "_" '{print $2}'|sort -rn
[root@db01 ~]# redis-cli --cluster info db01 6380 检查监控集群状态
db01:6380 (dbc61856...) -> 3343 keys | 5461 slots | 1 slaves.
10.0.0.172:6380 (d0cd29af...) -> 3314 keys | 5461 slots | 1 slaves.
10.0.0.173:6380 (24ec035c...) -> 3343 keys | 5462 slots | 1 slaves.
[OK] 10000 keys in 3 masters.
0.61 keys per slot on average.
试验 测试挂掉一个主节点6380 打开三个界面
for i in {1..100000};do redis-cli -c -h db01 -p 6380 set k_${i} v_${i};echo ${i};done
for i in {1..100000};do redis-cli -c -h db01 -p 6380 get k_${i};echo ${i};done
pkill redis db02 或者db03 执行
redis 集群短暂暂停后 恢复正常
启停脚本
vim redis_shell.sh
#!/bin/bash
USAG(){
echo "sh $0 {start|stop|restart|login|ps|tail} PORT"
}
if [ "$#" = 1 ]
then
REDIS_PORT='6379'
elif
[ "$#" = 2 -a -z "$(echo "$2"|sed 's#[0-9]##g')" ]
then
REDIS_PORT="$2"
else
USAG
exit 0
fi
REDIS_IP=$(hostname -I|awk '{print $1}')
PATH_DIR=/opt/redis_${REDIS_PORT}/
PATH_CONF=/opt/redis_${REDIS_PORT}/conf/redis_${REDIS_PORT}.conf
PATH_LOG=/opt/redis_${REDIS_PORT}/logs/redis_${REDIS_PORT}.log
CMD_START(){
redis-server ${PATH_CONF}
}
CMD_SHUTDOWN(){
redis-cli -c -h ${REDIS_IP} -p ${REDIS_PORT} shutdown
}
CMD_LOGIN(){
redis-cli -c -h ${REDIS_IP} -p ${REDIS_PORT}
}
CMD_PS(){
ps -ef|grep redis
}
CMD_TAIL(){
tail -f ${PATH_LOG}
}
case $1 in
start)
CMD_START
CMD_PS
;;
stop)
CMD_SHUTDOWN
CMD_PS
;;
restart)
CMD_START
CMD_SHUTDOWN
CMD_PS
;;
login)
CMD_LOGIN
;;
ps)
CMD_PS
;;
tail)
CMD_TAIL
;;
*)
USAG
esac
8.自动部署 集群
pkill redis
rm -rf /data/redis_6380/*
rm -rf /data/redis_6381/*
redis-server /opt/redis_6380/conf/redis_6380.conf
redis-server /opt/redis_6381/conf/redis_6381.conf
redis-cli --cluster create 10.0.0.171:6380 10.0.0.172:6380 10.0.0.173:6380 10.0.0.171:6381 10.0.0.172:6381 10.0.0.173:6381 --cluster-replicas 1
[root@db01 ~]# redis-cli -c -h db01 -p 6380
db01:6380> CLUSTER info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:36
cluster_stats_messages_pong_sent:43
cluster_stats_messages_sent:79
cluster_stats_messages_ping_received:38
cluster_stats_messages_pong_received:36
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:79
db01:6380> CLUSTER NODES
f299fbc44f40342f2fe61b70006c606a4c69aba5 10.0.0.173:6381@16381 slave 74696db11e1ebfb33516b1d3294c94888f98124b 0 1615250245000 6 connected
031beabec60a822f080ec3334dbbca962dd660d4 10.0.0.172:6381@16381 slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615250242000 5 connected
74696db11e1ebfb33516b1d3294c94888f98124b 10.0.0.172:6380@16380 master - 0 1615250245304 2 connected 5461-10922
1bc82eb26a628fa4083c6eb6c638b5b3b087f7c3 10.0.0.171:6381@16381 slave 5640ed3ccb0d3c45c752cc0c64234ad367936f34 0 1615250244299 4 connected
67aaad935f7735e514a9315bc588c743937ec53c 10.0.0.171:6380@16380 myself,master - 0 1615250243000 1 connected 0-5460
5640ed3ccb0d3c45c752cc0c64234ad367936f34 10.0.0.173:6380@16380 master - 0 1615250246309 3 connected 10923-16383
如果主从不合理 需要使用 调整
redis-cli -h db01 -p 6381 cluster replicate 5640ed3ccb0d3c45c752cc0c64234ad367936f34
9 节点扩容
9.1 添加节点
db04节点
yum install -y rsync
rsync -avz root@10.0.0.171:/opt/* /opt/
cd /opt/redis
make install
mkdir -p /opt/redis_{6380,6381}/{conf,logs,pid}
mkdir -p /data/redis_{6380,6381}
cat >/opt/redis_6381/conf/redis_6381.conf <<EOF
daemonize yes
bind $(ifconfig eth0|awk 'NR==2{print $2}')
port 6381
pidfile "/opt/redis_6381/pid/redis_6381.pid"
logfile "/opt/redis_6381/logs/redis_6381.log"
databases 16
dbfilename "redis_6381.rdb"
dir "/data/redis_6381"
appendonly yes
appendfilename "redis.aof"
appendfsync everysec
cluster-enabled yes
cluster-config-file nodes_6381.conf
cluster-node-timeout 15000
EOF
cat >/opt/redis_6380/conf/redis_6380.conf <<EOF
daemonize yes
bind $(ifconfig eth0|awk 'NR==2{print $2}')
port 6380
pidfile "/opt/redis_6380/pid/redis_6380.pid"
logfile "/opt/redis_6380/logs/redis_6380.log"
databases 16
dbfilename "redis_6380.rdb"
dir "/data/redis_6380"
appendonly yes
appendfilename "redis.aof"
appendfsync everysec
cluster-enabled yes
cluster-config-file nodes_6380.conf
cluster-node-timeout 15000
EOF
redis-server /opt/redis_6380/conf/redis_6380.conf
redis-server /opt/redis_6381/conf/redis_6381.conf
9.2 集群中添加节点
redis-cli -c -h db01 -p 6380 cluster meet 10.0.0.174 6380
redis-cli -c -h db01 -p 6380 cluster meet 10.0.0.174 6381
或者 推荐下面一种
redis-cli --cluster add-node 10.0.0.174:6380 10.0.0.171:6380
redis-cli --cluster add-node 10.0.0.174:6381 10.0.0.171:6380
db01:6380> CLUSTER NODES
f299fbc44f40342f2fe61b70006c606a4c69aba5 10.0.0.173:6381@16381 slave 74696db11e1ebfb33516b1d3294c94888f98124b 0 1615260465000 6 connected
031beabec60a822f080ec3334dbbca962dd660d4 10.0.0.172:6381@16381 slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615260465000 5 connected
74696db11e1ebfb33516b1d3294c94888f98124b 10.0.0.172:6380@16380 master - 0 1615260465000 2 connected 5461-10922
eea6c5607187e067caf79b879b4f1d0a1f479108 10.0.0.174:6381@16381 master - 0 1615260465000 0 connected
ca5364b95ac9de14d2aa999ffe43dfd91835f075 10.0.0.174:6380@16380 master - 0 1615260465000 0 connected
1bc82eb26a628fa4083c6eb6c638b5b3b087f7c3 10.0.0.171:6381@16381 slave 5640ed3ccb0d3c45c752cc0c64234ad367936f34 0 1615260465117 4 connected
67aaad935f7735e514a9315bc588c743937ec53c 10.0.0.171:6380@16380 myself,master - 0 1615260464000 1 connected 0-5460
5640ed3ccb0d3c45c752cc0c64234ad367936f34 10.0.0.173:6380@16380 master - 0 1615260466122 3 connected 10923-16383
9.3 集群中调整槽位
redis-cli --cluster reshard 10.0.0.171 6380
---4096 13684/4=4096 (4个节点)
---ca5364b95ac9de14d2aa999ffe43dfd91835f075 调整到那个redis节点上10.0.0.174:6380
---all 从那个槽点调整选择all
---yes 是否调整
db01:6380> CLUSTER nodes
f299fbc44f40342f2fe61b70006c606a4c69aba5 10.0.0.173:6381@16381 slave 74696db11e1ebfb33516b1d3294c94888f98124b 0 1615261289000 6 connected
031beabec60a822f080ec3334dbbca962dd660d4 10.0.0.172:6381@16381 slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615261290000 5 connected
74696db11e1ebfb33516b1d3294c94888f98124b 10.0.0.172:6380@16380 master - 0 1615261290328 2 connected 6827-10922
eea6c5607187e067caf79b879b4f1d0a1f479108 10.0.0.174:6381@16381 master - 0 1615261288318 0 connected
ca5364b95ac9de14d2aa999ffe43dfd91835f075 10.0.0.174:6380@16380 master - 0 1615261290000 7 connected 0-1364 5461-6826 10923-12287
1bc82eb26a628fa4083c6eb6c638b5b3b087f7c3 10.0.0.171:6381@16381 slave 5640ed3ccb0d3c45c752cc0c64234ad367936f34 0 1615261290000 4 connected
67aaad935f7735e514a9315bc588c743937ec53c 10.0.0.171:6380@16380 myself,master - 0 1615261288000 1 connected 1365-5460
5640ed3ccb0d3c45c752cc0c64234ad367936f34 10.0.0.173:6380@16380 master - 0 1615261291331 3 connected 12288-16383
9.4 集群中调整主从关系 注意从在前 主在后
redis-cli -h db01 -p 6381 CLUSTER REPLICATE 172的6380的ID
redis-cli -h db02 -p 6381 CLUSTER REPLICATE 173的6380的ID
redis-cli -h db03 -p 6381 CLUSTER REPLICATE 174的6380的ID
redis-cli -h db04 -p 6381 CLUSTER REPLICATE 171的6380的ID
[root@db01 ~]# redis-cli -h db03 -p 6381 CLUSTER REPLICATE ca5364b95ac9de14d2aa999ffe43dfd91835f075
OK
[root@db01 ~]# redis-cli -h db04 -p 6381 CLUSTER REPLICATE 67aaad935f7735e514a9315bc588c743937ec53c
OK
[root@db01 ~]# redis-cli -h db01 -p 6380 CLUSTER nodes
f299fbc44f40342f2fe61b70006c606a4c69aba5 10.0.0.173:6381@16381 slave ca5364b95ac9de14d2aa999ffe43dfd91835f075 0 1615261690000 7 connected
031beabec60a822f080ec3334dbbca962dd660d4 10.0.0.172:6381@16381 slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615261689000 5 connected
74696db11e1ebfb33516b1d3294c94888f98124b 10.0.0.172:6380@16380 master - 0 1615261691000 2 connected 6827-10922
eea6c5607187e067caf79b879b4f1d0a1f479108 10.0.0.174:6381@16381 slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615261691331 1 connected
ca5364b95ac9de14d2aa999ffe43dfd91835f075 10.0.0.174:6380@16380 master - 0 1615261692336 7 connected 0-1364 5461-6826 10923-12287
1bc82eb26a628fa4083c6eb6c638b5b3b087f7c3 10.0.0.171:6381@16381 slave 5640ed3ccb0d3c45c752cc0c64234ad367936f34 0 1615261688000 4 connected
67aaad935f7735e514a9315bc588c743937ec53c 10.0.0.171:6380@16380 myself,master - 0 1615261689000 1 connected 1365-5460
5640ed3ccb0d3c45c752cc0c64234ad367936f34 10.0.0.173:6380@16380 master - 0 1615261690325 3 connected 12288-16383
10 节点缩容
10.1 移动槽位 关闭主节点
redis-cli --cluster reshard 10.0.0.171 6380
---1365 原来第4个节点4096 4096/3=1365
---1bc82eb26a628fa4083c6eb6c638b5b3b087f7c3 调整那个节点 10.0.0.171:6380
---ca5364b95ac9de14d2aa999ffe43dfd91835f075 从那个槽位调整 10.0.0.174:6380
---done 接收
---yes 是否同意迁移
[root@db01 ~]# redis-cli -h db01 -p 6380 CLUSTER nodes
f299fbc44f40342f2fe61b70006c606a4c69aba5 10.0.0.173:6381@16381 slave ca5364b95ac9de14d2aa999ffe43dfd91835f075 0 1615262351917 7 connected
031beabec60a822f080ec3334dbbca962dd660d4 10.0.0.172:6381@16381 slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615262351000 8 connected
74696db11e1ebfb33516b1d3294c94888f98124b 10.0.0.172:6380@16380 master - 0 1615262350000 2 connected 6827-10922
eea6c5607187e067caf79b879b4f1d0a1f479108 10.0.0.174:6381@16381 slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615262349908 8 connected
ca5364b95ac9de14d2aa999ffe43dfd91835f075 10.0.0.174:6380@16380 master - 0 1615262350000 7 connected 5461-6826 10923-12287
1bc82eb26a628fa4083c6eb6c638b5b3b087f7c3 10.0.0.171:6381@16381 slave 5640ed3ccb0d3c45c752cc0c64234ad367936f34 0 1615262349000 4 connected
67aaad935f7735e514a9315bc588c743937ec53c 10.0.0.171:6380@16380 myself,master - 0 1615262352000 8 connected 0-5460
5640ed3ccb0d3c45c752cc0c64234ad367936f34 10.0.0.173:6380@16380 master - 0 1615262352922 3 connected 12288-16383
redis-cli --cluster reshard 10.0.0.171 6380
---1365 原来第4个节点4096 4096/3=1365
---74696db11e1ebfb33516b1d3294c94888f98124b 调整那个节点 10.0.0.172:6380
---ca5364b95ac9de14d2aa999ffe43dfd91835f075 从那个槽位调整 10.0.0.174:6380
---done 接收
---yes 是否同意迁移
redis-cli --cluster reshard 10.0.0.171 6380
M: ca5364b95ac9de14d2aa999ffe43dfd91835f075 10.0.0.174:6380
slots:[6826],[10923-12287] (1366 slots) master
---1366 看提示还有多少个
---5640ed3ccb0d3c45c752cc0c64234ad367936f34 调整那个节点 10.0.0.173:6380
---ca5364b95ac9de14d2aa999ffe43dfd91835f075 从那个槽位调整 10.0.0.174:6380
---done 接收
---yes 是否同意迁移
[root@db01 ~]# redis-cli --cluster check 10.0.0.174:6380 查看槽位情况
10.0.0.174:6380 (ca5364b9...) -> 0 keys | 0 slots | 0 slaves.
10.0.0.173:6380 (5640ed3c...) -> 0 keys | 5462 slots | 2 slaves.
10.0.0.171:6380 (67aaad93...) -> 0 keys | 5461 slots | 2 slaves.
10.0.0.172:6380 (74696db1...) -> 0 keys | 5461 slots | 0 slaves.
redis-cli --cluster del-node ip端口 ID 将节点从集群中删除
[root@db01 ~]# redis-cli --cluster del-node 10.0.0.174:6380 ca5364b95ac9de14d2aa999ffe43dfd91835f075
>>> Removing node ca5364b95ac9de14d2aa999ffe43dfd91835f075 from cluster 10.0.0.174:6380
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
10.2 设置主从关系 关闭从节点
redis-cli -h db01 -p 6381 CLUSTER REPLICATE 172的6380的ID
redis-cli -h db02 -p 6381 CLUSTER REPLICATE 173的6380的ID
redis-cli -h db03 -p 6381 CLUSTER REPLICATE 171的6380的ID
redis-cli -h db01 -p 6381 CLUSTER REPLICATE 74696db11e1ebfb33516b1d3294c94888f98124b
redis-cli -h db02 -p 6381 CLUSTER REPLICATE 5640ed3ccb0d3c45c752cc0c64234ad367936f34
redis-cli -h db03 -p 6381 CLUSTER REPLICATE 67aaad935f7735e514a9315bc588c743937ec53c
[root@db01 ~]# redis-cli --cluster del-node 10.0.0.174:6381 eea6c5607187e067caf79b879b4f1d0a1f479108 关闭从节点
>>> Removing node eea6c5607187e067caf79b879b4f1d0a1f479108 from cluster 10.0.0.174:6381
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
[root@db01 ~]# redis-cli -h db03 -p 6381 cluster nodes
74696db11e1ebfb33516b1d3294c94888f98124b 10.0.0.172:6380@16380 master - 0 1615270771611 9 connected 5461-6825 6827-10922
1bc82eb26a628fa4083c6eb6c638b5b3b087f7c3 10.0.0.171:6381@16381 slave 74696db11e1ebfb33516b1d3294c94888f98124b 0 1615270772615 10 connected
5640ed3ccb0d3c45c752cc0c64234ad367936f34 10.0.0.173:6380@16380 master - 0 1615270770605 10 connected 6826 10923-16383
f299fbc44f40342f2fe61b70006c606a4c69aba5 10.0.0.173:6381@16381 myself,slave 67aaad935f7735e514a9315bc588c743937ec53c 0 1615270771000 6 connected
031beabec60a822f080ec3334dbbca962dd660d4 10.0.0.172:6381@16381 slave 5640ed3ccb0d3c45c752cc0c64234ad367936f34 0 1615270771000 10 connected
67aaad935f7735e514a9315bc588c743937ec53c 10.0.0.171:6380@16380 master - 0 1615270769601 8 connected 0-5460
11. 集群工具
11.1 单节点复制集群中
单节点6379导入集群中 6380 单节点数据会删除
redis-cli --cluster import 10.0.0.171:6380 --cluster-from 10.0.0.171:6379
79导入集群中 6380 单节点数据会保留
redis-cli --cluster import 10.0.0.171:6380 --cluster-copy --cluster-from 10.0.0.171:6379
添加replace参数会覆盖掉同名的数据,对新集群新增加的数据不受影响
redis-cli --cluster import 10.0.0.171:6380 --cluster-copy --cluster-replace --cluster-from 10.0.0.171:6379
实验:
验证迁移期间边写边导会不会影响: 同时开2个终端,一个写入key
for i in {1..1000};do redis-cli set k_${i} v_${i};sleep 0.2;echo ${i};done
一个执行导入命令
redis-cli --cluster import 10.0.0.171:6380 --cluster-copy --cluster-replace --cluster-from 10.0.0.171:6379
只会导入当你执行导入命令那一刻时,当前被导入节点的所有数据,类似于快照,对于后面再写入的数据不会更新
11.2 找出redis中key很大的值
redis-cli -h db01 -p 2360 --bigkeys 官方自带的
https://github.com/sripathikrishnan/redis-rdb-tools
yum install python-pip gcc python-devel -y
cd /opt/
git clone https://github.com/sripathikrishnan/redis-rdb-tools
cd redis-rdb-tools
pip install python-lzf
python setup.py install
生成数据 保存文件
redis-cli -h db01 -p 6379 set txt $(cat txt.txt)
redis-cli -h db01 -p 6379 BGSAVE
分析数据
cd /data/redis_6379/
rdb -c memory redis_6379.rdb -f redis_6379.rdb.csv
查找大的值
awk -F"," '{print $4,$3}' redis_6379.rdb.csv |sort -r
11.3 redis 内存
config set maxmemory 2G 设置redis最大内存
redis 内存策略
1.noevicition 默认策略,不会删除任务数据,拒绝所有写入操作并返回客户端错误信息,此时只响应读操作
2.volatile-lru 根据LRU算法删除设置了超时属性的key,指导腾出足够空间为止,如果没有可删除的key,则退回到noevicition策略
3.allkeys-lru 根据LRU算法删除key,不管数据有没有设置超时属性
4.allkeys-random 随机删除所有key
5.volatile-random 随机删除过期key
5.volatile-ttl 根据key的ttl,删除最近要过期的key
先空出来系统一半内存
48G 一共
24G 系统
24G redis
redis先给8G内存 满了之后,分析结果告诉老大和开发,让他们排查一下是否所有的key都是必须的
redis再给到12G内存 满了之后,分析结果告诉老大和开发,让他们排查一下是否所有的key都是必须的
redis再给到16G内存 满了之后,分析结果告诉老大和开发,让他们排查一下是否所有的key都是必须的
等到24G都用完了之后,汇报领导,要考虑买内存了。
等到35G的时候,就要考虑是加内存,还是扩容机器。
11.4 redis 集群相关命令
redis-cli -h db01 -p 6380
CLUSTER NODES
CLUSTER MEET 10.0.0.52 6380
CLUSTER INFO
CLUSTER REPLICATE
CLUSTER ADDSLOTS
CLUSTER RESET
CLUSTER FAILOVER 故障转移
CLUSTER SETSLOT <slot> STABLE
redis-cli --cluster info 10.0.0.171 6380 集群状态
redis-cli --cluster rebalance 10.0.0.171 6380 查看平衡
redis-cli --cluster del-node 删除节点
redis-cli --cluster fix 10.0.0.171:6380 修复节点
[root@db01 redis-rdb-tools]# redis-benchmark -h db01 -p 6380 -n 10000 -q 性能测试
PING_INLINE: 104166.66 requests per second
PING_BULK: 112359.55 requests per second
SET: 114942.53 requests per second
GET: 114942.53 requests per second
INCR: 113636.37 requests per second
LPUSH: 100000.00 requests per second
RPUSH: 100000.00 requests per second
LPOP: 102040.82 requests per second
RPOP: 101010.10 requests per second
SADD: 113636.37 requests per second
HSET: 113636.37 requests per second
SPOP: 104166.66 requests per second
LPUSH (needed to benchmark LRANGE): 99009.90 requests per second
LRANGE_100 (first 100 elements): 51020.41 requests per second
LRANGE_300 (first 300 elements): 25773.20 requests per second
LRANGE_500 (first 450 elements): 20080.32 requests per second
LRANGE_600 (first 600 elements): 16207.46 requests per second
MSET (10 keys): 116279.06 requests per second
网友评论