1
0
mirror of https://github.com/mainflux/mainflux.git synced 2025-05-01 13:48:56 +08:00
b1ackd0t abc1e3d858
MF-703 - Reliably Publish Event Messages to Redis (#1836)
* Reliably Publish Event Messages to Redis

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Make Redis Producer Safe For Concurrent Use

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Combine Redis Publishing

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Add defer statement

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Use Channel Instead of Array

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Adding `occurred_at`

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Fix Check `occurred_at`

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Remove Unused Const

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Check For Non NIL Error on Publishing

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Add More Tests

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Hanndle When Channel Is Full

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Fix Issue After Rebase

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

* Fix Tests

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>

---------

Signed-off-by: rodneyosodo <blackd0t@protonmail.com>
2023-07-31 17:03:45 +02:00

116 lines
2.6 KiB
Go

// Copyright (c) Mainflux
// SPDX-License-Identifier: Apache-2.0
package redis
import (
"context"
"sync"
"time"
"github.com/go-redis/redis/v8"
)
const (
unpublishedEventsCheckInterval = 1 * time.Minute
redisConnCheckInterval = 100 * time.Millisecond
maxUnpublishedEvents uint64 = 1e6
)
// Event represents redis event.
type Event interface {
// Encode encodes event to map.
Encode() (map[string]interface{}, error)
}
// Publisher specifies redis event publishing API.
type Publisher interface {
// Publishes event to redis stream.
Publish(ctx context.Context, event Event) error
// StartPublishingRoutine starts routine that checks for unpublished events
// and publishes them to redis stream.
StartPublishingRoutine(ctx context.Context)
}
type eventStore struct {
client *redis.Client
unpublishedEvents chan *redis.XAddArgs
streamID string
streamLen int64
mu sync.Mutex
}
func NewEventStore(client *redis.Client, streamID string, streamLen int64) Publisher {
return &eventStore{
client: client,
unpublishedEvents: make(chan *redis.XAddArgs, maxUnpublishedEvents),
streamID: streamID,
streamLen: streamLen,
}
}
func (es *eventStore) Publish(ctx context.Context, event Event) error {
values, err := event.Encode()
if err != nil {
return err
}
values["occurred_at"] = time.Now().UnixNano()
record := &redis.XAddArgs{
Stream: es.streamID,
MaxLenApprox: es.streamLen,
Values: values,
}
if err := es.checkRedisConnection(ctx); err != nil {
es.mu.Lock()
defer es.mu.Unlock()
select {
case es.unpublishedEvents <- record:
default:
// If the channel is full (rarely happens), drop the events.
return nil
}
return nil
}
return es.client.XAdd(ctx, record).Err()
}
func (es *eventStore) StartPublishingRoutine(ctx context.Context) {
defer close(es.unpublishedEvents)
ticker := time.NewTicker(unpublishedEventsCheckInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := es.checkRedisConnection(ctx); err == nil {
es.mu.Lock()
for i := len(es.unpublishedEvents) - 1; i >= 0; i-- {
record := <-es.unpublishedEvents
if err := es.client.XAdd(ctx, record).Err(); err != nil {
es.unpublishedEvents <- record
break
}
}
es.mu.Unlock()
}
case <-ctx.Done():
return
}
}
}
func (es *eventStore) checkRedisConnection(ctx context.Context) error {
// A timeout is used to avoid blocking the main thread
ctx, cancel := context.WithTimeout(ctx, redisConnCheckInterval)
defer cancel()
return es.client.Ping(ctx).Err()
}