mirror of
https://github.com/mainflux/mainflux.git
synced 2025-04-29 13:49:28 +08:00

* Use message time as Point time in InfluxDB Signed-off-by: Dusan Borovcanin <dusan.borovcanin@mainflux.com> * Use actual message time Update all Reader and Writer services to use time from the message instead of time given from the corrseponding Writer service. Signed-off-by: Dusan Borovcanin <dusan.borovcanin@mainflux.com> * Remove message time check Messages time less than 2**28 represent time relative to the current time so Writer used to convert this to the correct value, i.e. msg.Time += time.Now(). However, this step is optional and should really be a part of the app on top of Mainflux or could be introduced with minor changes in Normalizer, Reader or Writer services, so there is no need for this to be supported out of box. Signed-off-by: Dusan Borovcanin <dusan.borovcanin@mainflux.com> * Use channel and publisher as tag keys Move all the other Message fields to the field keys. Signed-off-by: Dusan Borovcanin <dusan.borovcanin@mainflux.com>
110 lines
2.5 KiB
Go
110 lines
2.5 KiB
Go
//
|
|
// Copyright (c) 2018
|
|
// Mainflux
|
|
//
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
//
|
|
|
|
package cassandra_test
|
|
|
|
import (
|
|
"fmt"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/mainflux/mainflux"
|
|
readers "github.com/mainflux/mainflux/readers/cassandra"
|
|
writers "github.com/mainflux/mainflux/writers/cassandra"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
const (
|
|
keyspace = "mainflux"
|
|
chanID = 1
|
|
msgsNum = 42
|
|
valueFields = 6
|
|
)
|
|
|
|
var (
|
|
addr = "localhost"
|
|
msg = mainflux.Message{
|
|
Channel: chanID,
|
|
Publisher: 1,
|
|
Protocol: "mqtt",
|
|
}
|
|
)
|
|
|
|
func TestReadAll(t *testing.T) {
|
|
session, err := readers.Connect([]string{addr}, keyspace)
|
|
require.Nil(t, err, fmt.Sprintf("failed to connect to Cassandra: %s", err))
|
|
defer session.Close()
|
|
writer := writers.New(session)
|
|
|
|
messages := []mainflux.Message{}
|
|
now := time.Now().Unix()
|
|
for i := 0; i < msgsNum; i++ {
|
|
// Mix possible values as well as value sum.
|
|
count := i % valueFields
|
|
switch count {
|
|
case 0:
|
|
msg.Value = &mainflux.Message_FloatValue{5}
|
|
case 1:
|
|
msg.Value = &mainflux.Message_BoolValue{false}
|
|
case 2:
|
|
msg.Value = &mainflux.Message_StringValue{"value"}
|
|
case 3:
|
|
msg.Value = &mainflux.Message_DataValue{"base64data"}
|
|
case 4:
|
|
msg.ValueSum = nil
|
|
case 5:
|
|
msg.ValueSum = &mainflux.SumValue{Value: 45}
|
|
}
|
|
msg.Time = float64(now + int64(i))
|
|
|
|
err := writer.Save(msg)
|
|
require.Nil(t, err, fmt.Sprintf("failed to store message to Cassandra: %s", err))
|
|
messages = append(messages, msg)
|
|
}
|
|
|
|
reader := readers.New(session)
|
|
|
|
// Since messages are not saved in natural order,
|
|
// cases that return subset of messages are only
|
|
// checking data result set size, but not content.
|
|
cases := map[string]struct {
|
|
chanID uint64
|
|
offset uint64
|
|
limit uint64
|
|
messages []mainflux.Message
|
|
}{
|
|
"read message page for existing channel": {
|
|
chanID: chanID,
|
|
offset: 0,
|
|
limit: msgsNum,
|
|
messages: messages,
|
|
},
|
|
"read message page for non-existent channel": {
|
|
chanID: 2,
|
|
offset: 0,
|
|
limit: msgsNum,
|
|
messages: []mainflux.Message{},
|
|
},
|
|
"read message last page": {
|
|
chanID: chanID,
|
|
offset: 40,
|
|
limit: 5,
|
|
messages: messages[40:42],
|
|
},
|
|
}
|
|
|
|
for desc, tc := range cases {
|
|
result := reader.ReadAll(tc.chanID, tc.offset, tc.limit)
|
|
if tc.offset > 0 {
|
|
assert.Equal(t, len(tc.messages), len(result), fmt.Sprintf("%s: expected %d messages, got %d", desc, len(tc.messages), len(result)))
|
|
continue
|
|
}
|
|
assert.ElementsMatch(t, tc.messages, result, fmt.Sprintf("%s: expected %v got %v", desc, tc.messages, result))
|
|
}
|
|
}
|