feat:重放消息判断过期时间

This commit is contained in:
wfm 2023-05-12 17:23:21 +08:00
parent 711a2763ed
commit 494928ed88

View File

@ -7,17 +7,14 @@ import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
import org.redisson.api.RLock; import org.redisson.api.RLock;
import org.redisson.api.RedissonClient; import org.redisson.api.RedissonClient;
import org.springframework.data.redis.connection.stream.Consumer; import org.springframework.data.domain.Range;
import org.springframework.data.redis.connection.stream.MapRecord; import org.springframework.data.redis.connection.stream.*;
import org.springframework.data.redis.connection.stream.PendingMessagesSummary;
import org.springframework.data.redis.connection.stream.ReadOffset;
import org.springframework.data.redis.connection.stream.StreamOffset;
import org.springframework.data.redis.connection.stream.StreamRecords;
import org.springframework.data.redis.core.StreamOperations; import org.springframework.data.redis.core.StreamOperations;
import org.springframework.scheduling.annotation.Scheduled; import org.springframework.scheduling.annotation.Scheduled;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects;
/** /**
* 这个任务用于处理crash 之后的消费者未消费完的消息 * 这个任务用于处理crash 之后的消费者未消费完的消息
@ -33,6 +30,8 @@ public class RedisPendingMessageResendJob {
private final String groupName; private final String groupName;
private final RedissonClient redissonClient; private final RedissonClient redissonClient;
private final long expireTime = 1000 * 60;
/** /**
* 一分钟执行一次,这里选择每分钟的35秒执行是为了避免整点任务过多的问题 * 一分钟执行一次,这里选择每分钟的35秒执行是为了避免整点任务过多的问题
*/ */
@ -54,25 +53,28 @@ public class RedisPendingMessageResendJob {
private void execute() { private void execute() {
StreamOperations<String, Object, Object> ops = redisTemplate.getRedisTemplate().opsForStream(); StreamOperations<String, Object, Object> ops = redisTemplate.getRedisTemplate().opsForStream();
listeners.forEach(listener -> { listeners.forEach(listener -> {
PendingMessagesSummary pendingMessagesSummary = ops.pending(listener.getStreamKey(), groupName); PendingMessagesSummary pendingMessagesSummary = Objects.requireNonNull(ops.pending(listener.getStreamKey(), groupName));
// 每个消费者的 pending 队列消息数量 // 每个消费者的 pending 队列消息数量
Map<String, Long> pendingMessagesPerConsumer = pendingMessagesSummary.getPendingMessagesPerConsumer(); Map<String, Long> pendingMessagesPerConsumer = pendingMessagesSummary.getPendingMessagesPerConsumer();
pendingMessagesPerConsumer.forEach((consumerName, pendingMessageCount) -> { pendingMessagesPerConsumer.forEach((consumerName, pendingMessageCount) -> {
log.info("[processPendingMessage][消费者({}) 消息数量({})]", consumerName, pendingMessageCount); log.info("[processPendingMessage][消费者({}) 消息数量({})]", consumerName, pendingMessageCount);
// 从消费者的 pending 队列中读取消息 PendingMessages pendingMessages = ops.pending(listener.getStreamKey(), Consumer.from(groupName, consumerName), Range.unbounded(), pendingMessageCount);
List<MapRecord<String, Object, Object>> records = ops.read(Consumer.from(groupName, consumerName), StreamOffset.create(listener.getStreamKey(), ReadOffset.from("0"))); if(pendingMessages.isEmpty()){
if (CollUtil.isEmpty(records)) {
return; return;
} }
for (MapRecord<String, Object, Object> record : records) { for (PendingMessage pendingMessage : pendingMessages) {
if(pendingMessage.getElapsedTimeSinceLastDelivery().toMillis() - expireTime >= 0){
List<MapRecord<String, Object, Object>> records = ops.range(listener.getStreamKey(), Range.of(Range.Bound.inclusive(pendingMessage.getIdAsString()), Range.Bound.inclusive(pendingMessage.getIdAsString())));
if(!CollUtil.isEmpty(records)){
// 重新投递消息 // 重新投递消息
redisTemplate.getRedisTemplate().opsForStream().add(StreamRecords.newRecord() redisTemplate.getRedisTemplate().opsForStream().add(StreamRecords.newRecord()
.ofObject(record.getValue()) // 设置内容 .ofObject(records.get(0).getValue()) // 设置内容
.withStreamKey(listener.getStreamKey())); .withStreamKey(listener.getStreamKey()));
// ack 消息消费完成 // ack 消息消费完成
redisTemplate.getRedisTemplate().opsForStream().acknowledge(groupName, record); redisTemplate.getRedisTemplate().opsForStream().acknowledge(groupName, records.get(0));
}
}
} }
}); });
}); });