feat: store v3 (#2431)

This commit is contained in:
Simon-Pierre Vivier 2024-04-25 09:09:52 -04:00 committed by GitHub
parent 7f8d8e806c
commit 0b0fbfad5c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
55 changed files with 5646 additions and 1613 deletions

View File

@ -38,8 +38,8 @@ import
../../waku/waku_lightpush/common,
../../waku/waku_lightpush/rpc,
../../waku/waku_enr,
../../waku/waku_store,
../../waku/discovery/waku_dnsdisc,
../../waku/waku_store_legacy,
../../waku/waku_node,
../../waku/node/waku_metrics,
../../waku/node/peer_manager,
@ -469,7 +469,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
# We have a viable storenode. Let's query it for historical messages.
echo "Connecting to storenode: " & $(storenode.get())
node.mountStoreClient()
node.mountLegacyStoreClient()
node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec)
proc storeHandler(response: HistoryResponse) {.gcsafe.} =

View File

@ -33,9 +33,16 @@ import
./waku_store/test_waku_store,
./waku_store/test_wakunode_store
# Waku legacy store test suite
import
./waku_store_legacy/test_client,
./waku_store_legacy/test_rpc_codec,
./waku_store_legacy/test_waku_store,
./waku_store_legacy/test_wakunode_store
when defined(waku_exp_store_resume):
# TODO: Review store resume test cases (#1282)
import ./waku_store/test_resume
import ./waku_store_legacy/test_resume
import
./node/test_all,

View File

@ -2,4 +2,5 @@ import
./test_wakunode_filter,
./test_wakunode_lightpush,
./test_wakunode_peer_exchange,
./test_wakunode_store
./test_wakunode_store,
./test_wakunode_legacy_store

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
import chronos
import ../../../waku/[waku_core/message, waku_store]
import ../../../waku/[waku_core/message, waku_store, waku_store_legacy]
const
FUTURE_TIMEOUT* = 1.seconds
@ -13,8 +13,11 @@ proc newPushHandlerFuture*(): Future[(string, WakuMessage)] =
proc newBoolFuture*(): Future[bool] =
newFuture[bool]()
proc newHistoryFuture*(): Future[HistoryQuery] =
newFuture[HistoryQuery]()
proc newHistoryFuture*(): Future[StoreQueryRequest] =
newFuture[StoreQueryRequest]()
proc newLegacyHistoryFuture*(): Future[waku_store_legacy.HistoryQuery] =
newFuture[waku_store_legacy.HistoryQuery]()
proc toResult*[T](future: Future[T]): Result[T, string] =
if future.cancelled():

View File

@ -17,11 +17,13 @@ proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
let
message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
topic = "test-pubsub-topic"
cursor = Index(
receiverTime: Timestamp(i),
senderTime: Timestamp(i),
digest: MessageDigest(data: data),
pubsubTopic: "test-pubsub-topic",
pubsubTopic: topic,
hash: computeMessageHash(topic, message),
)
(cursor, message)

View File

@ -1,8 +1,10 @@
{.used.}
import std/times, stew/byteutils, testutils/unittests, nimcrypto
import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto
import ../../../waku/waku_core, ../../../waku/waku_archive/driver/queue_driver/index
var rng = initRand()
## Helpers
proc getTestTimestamp(offset = 0): Timestamp =
@ -19,6 +21,15 @@ proc hashFromStr(input: string): MDigest[256] =
return hashed
proc randomHash(): WakuMessageHash =
var hash: WakuMessageHash
for i in 0 ..< hash.len:
let numb: byte = byte(rng.next())
hash[i] = numb
hash
suite "Queue Driver - index":
## Test vars
let
@ -26,67 +37,79 @@ suite "Queue Driver - index":
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
smallIndex2 = Index(
digest: hashFromStr("1234567"), # digest is less significant than senderTime
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
largeIndex1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(9000),
hash: randomHash(),
) # only senderTime differ from smallIndex1
largeIndex2 = Index(
digest: hashFromStr("12345"), # only digest differs from smallIndex1
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
eqIndex1 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
eqIndex2 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
eqIndex3 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(9999),
# receiverTime difference should have no effect on comparisons
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
diffPsTopic = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(1100),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime2 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(10000),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime3 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(1200),
senderTime: getNanosecondTime(0),
pubsubTopic: "aaaa",
hash: randomHash(),
)
noSenderTime4 = Index(
digest: hashFromStr("0"),
receiverTime: getNanosecondTime(1200),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
test "Index comparison":

View File

@ -24,6 +24,7 @@ proc getTestQueueDriver(numMessages: int): QueueDriver =
receiverTime: Timestamp(i),
senderTime: Timestamp(i),
digest: MessageDigest(data: data),
hash: computeMessageHash(DefaultPubsubTopic, msg),
)
discard testQueueDriver.add(index, msg)

View File

@ -7,7 +7,7 @@ import
../testlib/[common, wakucore]
proc newTestWakuStore*(
switch: Switch, handler: HistoryQueryHandler
switch: Switch, handler: StoreQueryRequestHandler
): Future[WakuStore] {.async.} =
let
peerManager = PeerManager.new(switch)
@ -21,13 +21,3 @@ proc newTestWakuStore*(
proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient =
let peerManager = PeerManager.new(switch)
WakuStoreClient.new(peerManager, rng)
proc computeHistoryCursor*(
pubsubTopic: PubsubTopic, message: WakuMessage
): HistoryCursor =
HistoryCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: waku_store.computeDigest(message),
)

View File

@ -1,8 +1,3 @@
{.used.}
import
./test_client,
./test_resume,
./test_rpc_codec,
./test_waku_store,
./test_wakunode_store
import ./test_client, ./test_rpc_codec, ./test_waku_store, ./test_wakunode_store

View File

@ -12,10 +12,13 @@ suite "Store Client":
var message1 {.threadvar.}: WakuMessage
var message2 {.threadvar.}: WakuMessage
var message3 {.threadvar.}: WakuMessage
var messageSeq {.threadvar.}: seq[WakuMessage]
var handlerFuture {.threadvar.}: Future[HistoryQuery]
var handler {.threadvar.}: HistoryQueryHandler
var historyQuery {.threadvar.}: HistoryQuery
var hash1 {.threadvar.}: WakuMessageHash
var hash2 {.threadvar.}: WakuMessageHash
var hash3 {.threadvar.}: WakuMessageHash
var messageSeq {.threadvar.}: seq[WakuMessageKeyValue]
var handlerFuture {.threadvar.}: Future[StoreQueryRequest]
var handler {.threadvar.}: StoreQueryRequestHandler
var storeQuery {.threadvar.}: StoreQueryRequest
var serverSwitch {.threadvar.}: Switch
var clientSwitch {.threadvar.}: Switch
@ -30,15 +33,25 @@ suite "Store Client":
message1 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message2 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message3 = fakeWakuMessage(contentTopic = DefaultContentTopic)
messageSeq = @[message1, message2, message3]
hash1 = computeMessageHash(DefaultPubsubTopic, message1)
hash2 = computeMessageHash(DefaultPubsubTopic, message2)
hash3 = computeMessageHash(DefaultPubsubTopic, message3)
messageSeq =
@[
WakuMessageKeyValue(messageHash: hash1, message: message1),
WakuMessageKeyValue(messageHash: hash2, message: message2),
WakuMessageKeyValue(messageHash: hash3, message: message3),
]
handlerFuture = newHistoryFuture()
handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} =
handlerFuture.complete(req)
return ok(HistoryResponse(messages: messageSeq))
historyQuery = HistoryQuery(
handler = proc(req: StoreQueryRequest): Future[StoreQueryResult] {.async, gcsafe.} =
var request = req
request.requestId = ""
handlerFuture.complete(request)
return ok(StoreQueryResponse(messages: messageSeq))
storeQuery = StoreQueryRequest(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
paginationForward: PagingDirection.FORWARD,
)
serverSwitch = newTestSwitch()
@ -55,15 +68,15 @@ suite "Store Client":
asyncTeardown:
await allFutures(serverSwitch.stop(), clientSwitch.stop())
suite "HistoryQuery Creation and Execution":
suite "StoreQueryRequest Creation and Execution":
asyncTest "Valid Queries":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
let queryResponse = await client.query(storeQuery, peer = serverPeerInfo)
# Then the query is processed successfully
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == historyQuery
handlerFuture.read() == storeQuery
queryResponse.get().messages == messageSeq
asyncTest "Invalid Queries":
@ -73,33 +86,33 @@ suite "Store Client":
# Given some invalid queries
let
invalidQuery1 = HistoryQuery(
invalidQuery1 = StoreQueryRequest(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[],
direction: PagingDirection.FORWARD,
paginationForward: PagingDirection.FORWARD,
)
invalidQuery2 = HistoryQuery(
invalidQuery2 = StoreQueryRequest(
pubsubTopic: PubsubTopic.none(),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
paginationForward: PagingDirection.FORWARD,
)
invalidQuery3 = HistoryQuery(
invalidQuery3 = StoreQueryRequest(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
paginationLimit: some(uint64(0)),
)
invalidQuery4 = HistoryQuery(
invalidQuery4 = StoreQueryRequest(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
paginationLimit: some(uint64(0)),
)
invalidQuery5 = HistoryQuery(
invalidQuery5 = StoreQueryRequest(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
endTime: some(0.Timestamp),
)
invalidQuery6 = HistoryQuery(
invalidQuery6 = StoreQueryRequest(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
@ -165,15 +178,15 @@ suite "Store Client":
handlerFuture.read() == invalidQuery6
queryResponse6.get().messages == messageSeq
suite "Verification of HistoryResponse Payload":
suite "Verification of StoreQueryResponse Payload":
asyncTest "Positive Responses":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
let queryResponse = await client.query(storeQuery, peer = serverPeerInfo)
# Then the query is processed successfully, and is of the expected type
check:
await handlerFuture.withTimeout(FUTURE_TIMEOUT)
type(queryResponse.get()) is HistoryResponse
type(queryResponse.get()) is StoreQueryResponse
asyncTest "Negative Responses - PeerDialFailure":
# Given a stopped peer
@ -182,10 +195,10 @@ suite "Store Client":
otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo()
# When a query is sent to the stopped peer
let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo)
let queryResponse = await client.query(storeQuery, peer = otherServerPeerInfo)
# Then the query is not processed
check:
not await handlerFuture.withTimeout(FUTURE_TIMEOUT)
queryResponse.isErr()
queryResponse.error.kind == HistoryErrorKind.PEER_DIAL_FAILURE
queryResponse.error.kind == ErrorCode.PEER_DIAL_FAILURE

View File

@ -5,113 +5,29 @@ import
../../../waku/common/protobuf,
../../../waku/common/paging,
../../../waku/waku_core,
../../../waku/waku_store/rpc,
../../../waku/waku_store/common,
../../../waku/waku_store/rpc_codec,
../testlib/common,
../testlib/wakucore
procSuite "Waku Store - RPC codec":
test "PagingIndexRPC protobuf codec":
test "StoreQueryRequest protobuf codec":
## Given
let index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
let query = StoreQueryRequest(
requestId: "0",
includeData: false,
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(Timestamp(10)),
endTime: some(Timestamp(11)),
messageHashes: @[],
paginationCursor: none(WakuMessageHash),
paginationForward: PagingDirection.FORWARD,
paginationLimit: some(DefaultPageSize),
)
## When
let encodedIndex = index.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# The fields of decodedIndex must be the same as the original index
decodedIndex == index
test "PagingIndexRPC protobuf codec - empty index":
## Given
let emptyIndex = PagingIndexRPC()
let encodedIndex = emptyIndex.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# Check the correctness of init and encode for an empty PagingIndexRPC
decodedIndex == emptyIndex
test "PagingInfoRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.FORWARD),
)
## When
let pb = pagingInfo.encode()
let decodedPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedPagingInfo.isOk()
check:
# The fields of decodedPagingInfo must be the same as the original pagingInfo
decodedPagingInfo.value == pagingInfo
decodedPagingInfo.value.direction == pagingInfo.direction
test "PagingInfoRPC protobuf codec - empty paging info":
## Given
let emptyPagingInfo = PagingInfoRPC()
## When
let pb = emptyPagingInfo.encode()
let decodedEmptyPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedEmptyPagingInfo.isOk()
check:
# check the correctness of init and encode for an empty PagingInfoRPC
decodedEmptyPagingInfo.value == emptyPagingInfo
test "HistoryQueryRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
query = HistoryQueryRPC(
contentFilters:
@[
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
],
pagingInfo: some(pagingInfo),
startTime: some(Timestamp(10)),
endTime: some(Timestamp(11)),
)
## When
let pb = query.encode()
let decodedQuery = HistoryQueryRPC.decode(pb.buffer)
let decodedQuery = StoreQueryRequest.decode(pb.buffer)
## Then
check:
@ -121,13 +37,13 @@ procSuite "Waku Store - RPC codec":
# the fields of decoded query decodedQuery must be the same as the original query query
decodedQuery.value == query
test "HistoryQueryRPC protobuf codec - empty history query":
test "StoreQueryRequest protobuf codec - empty history query":
## Given
let emptyQuery = HistoryQueryRPC()
let emptyQuery = StoreQueryRequest()
## When
let pb = emptyQuery.encode()
let decodedEmptyQuery = HistoryQueryRPC.decode(pb.buffer)
let decodedEmptyQuery = StoreQueryRequest.decode(pb.buffer)
## Then
check:
@ -137,27 +53,23 @@ procSuite "Waku Store - RPC codec":
# check the correctness of init and encode for an empty HistoryQueryRPC
decodedEmptyQuery.value == emptyQuery
test "HistoryResponseRPC protobuf codec":
test "StoreQueryResponse protobuf codec":
## Given
let
message = fakeWakuMessage()
index = PagingIndexRPC.compute(
message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
res = HistoryResponseRPC(
messages: @[message],
pagingInfo: some(pagingInfo),
error: HistoryResponseErrorRPC.INVALID_CURSOR,
hash = computeMessageHash(DefaultPubsubTopic, message)
keyValue = WakuMessageKeyValue(messageHash: hash, message: message)
res = StoreQueryResponse(
requestId: "1",
statusCode: 200,
statusDesc: "it's fine",
messages: @[keyValue],
paginationCursor: none(WakuMessageHash),
)
## When
let pb = res.encode()
let decodedRes = HistoryResponseRPC.decode(pb.buffer)
let decodedRes = StoreQueryResponse.decode(pb.buffer)
## Then
check:
@ -167,13 +79,13 @@ procSuite "Waku Store - RPC codec":
# the fields of decoded response decodedRes must be the same as the original response res
decodedRes.value == res
test "HistoryResponseRPC protobuf codec - empty history response":
test "StoreQueryResponse protobuf codec - empty history response":
## Given
let emptyRes = HistoryResponseRPC()
let emptyRes = StoreQueryResponse()
## When
let pb = emptyRes.encode()
let decodedEmptyRes = HistoryResponseRPC.decode(pb.buffer)
let decodedEmptyRes = StoreQueryResponse.decode(pb.buffer)
## Then
check:

View File

@ -3,8 +3,15 @@
import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto
import
../../../waku/
[common/paging, node/peer_manager, waku_core, waku_store, waku_store/client],
../../../waku/[
common/paging,
node/peer_manager,
waku_core,
waku_core/message/digest,
waku_store,
waku_store/client,
waku_store/common,
],
../testlib/[common, wakucore],
./store_utils
@ -21,21 +28,25 @@ suite "Waku Store - query handler":
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
let msg = fakeWakuMessage(contentTopic = DefaultContentTopic)
let hash = computeMessageHash(DefaultPubsubTopic, msg)
let kv = WakuMessageKeyValue(messageHash: hash, message: msg)
var queryHandlerFut = newFuture[(HistoryQuery)]()
var queryHandlerFut = newFuture[(StoreQueryRequest)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return ok(HistoryResponse(messages: @[msg]))
req: StoreQueryRequest
): Future[StoreQueryResult] {.async, gcsafe.} =
var request = req
request.requestId = "" # Must remove the id for equality
queryHandlerFut.complete(request)
return ok(StoreQueryResponse(messages: @[kv]))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD
let req = StoreQueryRequest(
contentTopics: @[DefaultContentTopic], paginationForward: PagingDirection.FORWARD
)
## When
@ -53,7 +64,7 @@ suite "Waku Store - query handler":
let response = queryRes.tryGet()
check:
response.messages.len == 1
response.messages == @[msg]
response.messages == @[kv]
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())
@ -69,19 +80,21 @@ suite "Waku Store - query handler":
## Given
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
var queryHandlerFut = newFuture[(HistoryQuery)]()
var queryHandlerFut = newFuture[(StoreQueryRequest)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST))
req: StoreQueryRequest
): Future[StoreQueryResult] {.async, gcsafe.} =
var request = req
request.requestId = "" # Must remove the id for equality
queryHandlerFut.complete(request)
return err(StoreError(kind: ErrorCode.BAD_REQUEST))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD
let req = StoreQueryRequest(
contentTopics: @[DefaultContentTopic], paginationForward: PagingDirection.FORWARD
)
## When
@ -98,7 +111,7 @@ suite "Waku Store - query handler":
let error = queryRes.tryError()
check:
error.kind == HistoryErrorKind.BAD_REQUEST
error.kind == ErrorCode.BAD_REQUEST
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())

View File

@ -1,6 +1,7 @@
{.used.}
import
std/sequtils,
stew/shims/net as stewNet,
testutils/unittests,
chronicles,
@ -13,7 +14,6 @@ import
libp2p/protocols/pubsub/pubsub,
libp2p/protocols/pubsub/gossipsub
import
../../../waku/common/databases/db_sqlite,
../../../waku/common/paging,
../../../waku/waku_core,
../../../waku/waku_core/message/digest,
@ -27,7 +27,6 @@ import
../../../waku/waku_node,
../waku_store/store_utils,
../waku_archive/archive_utils,
../testlib/common,
../testlib/wakucore,
../testlib/wakunode
@ -48,14 +47,21 @@ procSuite "WakuNode - Store":
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let hashes = msgListA.mapIt(computeMessageHash(DefaultPubsubTopic, it))
let kvs =
zip(hashes, msgListA).mapIt(WakuMessageKeyValue(messageHash: it[0], message: it[1]))
let archiveA = block:
let driver = newSqliteArchiveDriver()
for msg in msgListA:
let msg_digest = waku_archive.computeDigest(msg)
let msg_hash = computeMessageHash(DefaultPubsubTopic, msg)
for kv in kvs:
let msg_digest = computeDigest(kv.message)
require (
waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp)
waitFor driver.put(
DefaultPubsubTopic, kv.message, msg_digest, kv.messageHash,
kv.message.timestamp,
)
).isOk()
driver
@ -78,7 +84,7 @@ procSuite "WakuNode - Store":
client.mountStoreClient()
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic])
let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic])
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
@ -89,7 +95,7 @@ procSuite "WakuNode - Store":
let response = queryRes.get()
check:
response.messages == msgListA
response.messages == kvs
# Cleanup
waitFor allFutures(client.stop(), server.stop())
@ -112,18 +118,18 @@ procSuite "WakuNode - Store":
client.mountStoreClient()
## Given
let req = HistoryQuery(
let req = StoreQueryRequest(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.FORWARD,
paginationForward: PagingDirection.FORWARD,
paginationLimit: some(uint64(7)),
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
var pages = newSeq[seq[WakuMessageKeyValue]](2)
var cursors = newSeq[Option[WakuMessageHash]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
@ -132,19 +138,19 @@ procSuite "WakuNode - Store":
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
cursors[i] = response.paginationCursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
nextReq.paginationCursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[6]))
cursors[1] == none(HistoryCursor)
cursors[0] == some(kvs[6].messageHash)
cursors[1] == none(WakuMessageHash)
check:
pages[0] == msgListA[0 .. 6]
pages[1] == msgListA[7 .. 9]
pages[0] == kvs[0 .. 6]
pages[1] == kvs[7 .. 9]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
@ -167,18 +173,18 @@ procSuite "WakuNode - Store":
client.mountStoreClient()
## Given
let req = HistoryQuery(
let req = StoreQueryRequest(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.BACKWARD,
paginationLimit: some(uint64(7)),
paginationForward: PagingDirection.BACKWARD,
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
var pages = newSeq[seq[WakuMessageKeyValue]](2)
var cursors = newSeq[Option[WakuMessageHash]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
@ -187,19 +193,19 @@ procSuite "WakuNode - Store":
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
cursors[i] = response.paginationCursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
nextReq.paginationCursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[3]))
cursors[1] == none(HistoryCursor)
cursors[0] == some(kvs[3].messageHash)
cursors[1] == none(WakuMessageHash)
check:
pages[0] == msgListA[3 .. 9]
pages[1] == msgListA[0 .. 2]
pages[0] == kvs[3 .. 9]
pages[1] == kvs[0 .. 2]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
@ -230,6 +236,7 @@ procSuite "WakuNode - Store":
## Given
let message = fakeWakuMessage()
let hash = computeMessageHash(DefaultPubSubTopic, message)
let
serverPeer = server.peerInfo.toRemotePeerInfo()
filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo()
@ -254,9 +261,8 @@ procSuite "WakuNode - Store":
# Wait for the server filter to receive the push message
require waitFor filterFut.withTimeout(5.seconds)
let res = waitFor client.query(
HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer
)
let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic])
let res = waitFor client.query(req, serverPeer)
## Then
check res.isOk()
@ -264,7 +270,7 @@ procSuite "WakuNode - Store":
let response = res.get()
check:
response.messages.len == 1
response.messages[0] == message
response.messages[0] == WakuMessageKeyValue(messageHash: hash, message: message)
let (handledPubsubTopic, handledMsg) = filterFut.read()
check:
@ -292,28 +298,27 @@ procSuite "WakuNode - Store":
client.mountStoreClient()
## Forcing a bad cursor with empty digest data
var data: array[32, byte] = [
var cursor: WakuMessageHash = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
]
let cursor = HistoryCursor(
pubsubTopic: "pubsubTopic",
senderTime: now(),
storeTime: now(),
digest: waku_archive.MessageDigest(data: data),
)
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor))
let req = StoreQueryRequest(
contentTopics: @[DefaultContentTopic], paginationCursor: some(cursor)
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
let queryRes = waitFor client.query(req, peer = serverPeer)
## Then
check not queryRes.isOk()
check queryRes.isOk()
check queryRes.error == "BAD_REQUEST: invalid cursor"
let response = queryRes.get()
check response.statusCode == 400
check response.statusDesc == "BAD_REQUEST: invalid cursor"
# Cleanup
waitFor allFutures(client.stop(), server.stop())
@ -336,7 +341,7 @@ procSuite "WakuNode - Store":
client.mountStoreClient()
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic])
let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic])
let serverPeer = server.peerInfo.toRemotePeerInfo()
let requestProc = proc() {.async.} =
@ -346,7 +351,7 @@ procSuite "WakuNode - Store":
let response = queryRes.get()
check:
response.messages == msgListA
response.messages.mapIt(it.message) == msgListA
for count in 0 ..< 4:
waitFor requestProc()
@ -379,23 +384,24 @@ procSuite "WakuNode - Store":
client.mountStoreClient()
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic])
let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic])
let serverPeer = server.peerInfo.toRemotePeerInfo()
let successProc = proc() {.async.} =
let queryRes = waitFor client.query(req, peer = serverPeer)
check queryRes.isOk()
let response = queryRes.get()
check:
response.messages == msgListA
response.messages.mapIt(it.message) == msgListA
let failsProc = proc() {.async.} =
let queryRes = waitFor client.query(req, peer = serverPeer)
check queryRes.isErr()
check queryRes.error == "TOO_MANY_REQUESTS"
check queryRes.isOk()
let response = queryRes.get()
check response.statusCode == 429
for count in 0 ..< 3:
waitFor successProc()

View File

@ -0,0 +1,34 @@
{.used.}
import std/options, chronos, chronicles, libp2p/crypto/crypto
import
../../../waku/
[node/peer_manager, waku_core, waku_store_legacy, waku_store_legacy/client],
../testlib/[common, wakucore]
proc newTestWakuStore*(
switch: Switch, handler: HistoryQueryHandler
): Future[WakuStore] {.async.} =
let
peerManager = PeerManager.new(switch)
proto = WakuStore.new(peerManager, rng, handler)
await proto.start()
switch.mount(proto)
return proto
proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient =
let peerManager = PeerManager.new(switch)
WakuStoreClient.new(peerManager, rng)
proc computeHistoryCursor*(
pubsubTopic: PubsubTopic, message: WakuMessage
): HistoryCursor =
HistoryCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: computeDigest(message),
)

View File

@ -0,0 +1,8 @@
{.used.}
import
./test_client,
./test_resume,
./test_rpc_codec,
./test_waku_store,
./test_wakunode_store

View File

@ -0,0 +1,196 @@
{.used.}
import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto
import
../../../waku/[
node/peer_manager,
waku_core,
waku_store_legacy,
waku_store_legacy/client,
common/paging,
],
../testlib/[common, wakucore, testasync, futures],
./store_utils
suite "Store Client":
var message1 {.threadvar.}: WakuMessage
var message2 {.threadvar.}: WakuMessage
var message3 {.threadvar.}: WakuMessage
var messageSeq {.threadvar.}: seq[WakuMessage]
var handlerFuture {.threadvar.}: Future[HistoryQuery]
var handler {.threadvar.}: HistoryQueryHandler
var historyQuery {.threadvar.}: HistoryQuery
var serverSwitch {.threadvar.}: Switch
var clientSwitch {.threadvar.}: Switch
var server {.threadvar.}: WakuStore
var client {.threadvar.}: WakuStoreClient
var serverPeerInfo {.threadvar.}: RemotePeerInfo
var clientPeerInfo {.threadvar.}: RemotePeerInfo
asyncSetup:
message1 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message2 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message3 = fakeWakuMessage(contentTopic = DefaultContentTopic)
messageSeq = @[message1, message2, message3]
handlerFuture = newLegacyHistoryFuture()
handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} =
handlerFuture.complete(req)
return ok(HistoryResponse(messages: messageSeq))
historyQuery = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
)
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
server = await newTestWakuStore(serverSwitch, handler = handler)
client = newTestWakuStoreClient(clientSwitch)
await allFutures(serverSwitch.start(), clientSwitch.start())
serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo()
asyncTeardown:
await allFutures(serverSwitch.stop(), clientSwitch.stop())
suite "HistoryQuery Creation and Execution":
asyncTest "Valid Queries":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
# Then the query is processed successfully
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == historyQuery
queryResponse.get().messages == messageSeq
asyncTest "Invalid Queries":
# TODO: IMPROVE: We can't test "actual" invalid queries because
# it directly depends on the handler implementation, to achieve
# proper coverage we'd need an example implementation.
# Given some invalid queries
let
invalidQuery1 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[],
direction: PagingDirection.FORWARD,
)
invalidQuery2 = HistoryQuery(
pubsubTopic: PubsubTopic.none(),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
)
invalidQuery3 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
)
invalidQuery4 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
)
invalidQuery5 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
endTime: some(0.Timestamp),
)
invalidQuery6 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
endTime: some(-1.Timestamp),
)
# When the query is sent to the server
let queryResponse1 = await client.query(invalidQuery1, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery1
queryResponse1.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse2 = await client.query(invalidQuery2, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery2
queryResponse2.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse3 = await client.query(invalidQuery3, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery3
queryResponse3.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse4 = await client.query(invalidQuery4, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery4
queryResponse4.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse5 = await client.query(invalidQuery5, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery5
queryResponse5.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse6 = await client.query(invalidQuery6, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery6
queryResponse6.get().messages == messageSeq
suite "Verification of HistoryResponse Payload":
asyncTest "Positive Responses":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
# Then the query is processed successfully, and is of the expected type
check:
await handlerFuture.withTimeout(FUTURE_TIMEOUT)
type(queryResponse.get()) is HistoryResponse
asyncTest "Negative Responses - PeerDialFailure":
# Given a stopped peer
let
otherServerSwitch = newTestSwitch()
otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo()
# When a query is sent to the stopped peer
let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo)
# Then the query is not processed
check:
not await handlerFuture.withTimeout(FUTURE_TIMEOUT)
queryResponse.isErr()
queryResponse.error.kind == HistoryErrorKind.PEER_DIAL_FAILURE

View File

@ -13,8 +13,8 @@ import
../../waku/node/peer_manager,
../../waku/waku_core,
../../waku/waku_core/message/digest,
../../waku/waku_store,
../waku_store/store_utils,
../../waku/waku_store_legacy,
../waku_store_legacy/store_utils,
../waku_archive/archive_utils,
./testlib/common,
./testlib/switch

View File

@ -0,0 +1,184 @@
{.used.}
import std/options, testutils/unittests, chronos
import
../../../waku/common/protobuf,
../../../waku/common/paging,
../../../waku/waku_core,
../../../waku/waku_store_legacy/rpc,
../../../waku/waku_store_legacy/rpc_codec,
../testlib/common,
../testlib/wakucore
procSuite "Waku Store - RPC codec":
test "PagingIndexRPC protobuf codec":
## Given
let index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
## When
let encodedIndex = index.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# The fields of decodedIndex must be the same as the original index
decodedIndex == index
test "PagingIndexRPC protobuf codec - empty index":
## Given
let emptyIndex = PagingIndexRPC()
let encodedIndex = emptyIndex.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# Check the correctness of init and encode for an empty PagingIndexRPC
decodedIndex == emptyIndex
test "PagingInfoRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.FORWARD),
)
## When
let pb = pagingInfo.encode()
let decodedPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedPagingInfo.isOk()
check:
# The fields of decodedPagingInfo must be the same as the original pagingInfo
decodedPagingInfo.value == pagingInfo
decodedPagingInfo.value.direction == pagingInfo.direction
test "PagingInfoRPC protobuf codec - empty paging info":
## Given
let emptyPagingInfo = PagingInfoRPC()
## When
let pb = emptyPagingInfo.encode()
let decodedEmptyPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedEmptyPagingInfo.isOk()
check:
# check the correctness of init and encode for an empty PagingInfoRPC
decodedEmptyPagingInfo.value == emptyPagingInfo
test "HistoryQueryRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
query = HistoryQueryRPC(
contentFilters:
@[
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
],
pagingInfo: some(pagingInfo),
startTime: some(Timestamp(10)),
endTime: some(Timestamp(11)),
)
## When
let pb = query.encode()
let decodedQuery = HistoryQueryRPC.decode(pb.buffer)
## Then
check:
decodedQuery.isOk()
check:
# the fields of decoded query decodedQuery must be the same as the original query query
decodedQuery.value == query
test "HistoryQueryRPC protobuf codec - empty history query":
## Given
let emptyQuery = HistoryQueryRPC()
## When
let pb = emptyQuery.encode()
let decodedEmptyQuery = HistoryQueryRPC.decode(pb.buffer)
## Then
check:
decodedEmptyQuery.isOk()
check:
# check the correctness of init and encode for an empty HistoryQueryRPC
decodedEmptyQuery.value == emptyQuery
test "HistoryResponseRPC protobuf codec":
## Given
let
message = fakeWakuMessage()
index = PagingIndexRPC.compute(
message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
res = HistoryResponseRPC(
messages: @[message],
pagingInfo: some(pagingInfo),
error: HistoryResponseErrorRPC.INVALID_CURSOR,
)
## When
let pb = res.encode()
let decodedRes = HistoryResponseRPC.decode(pb.buffer)
## Then
check:
decodedRes.isOk()
check:
# the fields of decoded response decodedRes must be the same as the original response res
decodedRes.value == res
test "HistoryResponseRPC protobuf codec - empty history response":
## Given
let emptyRes = HistoryResponseRPC()
## When
let pb = emptyRes.encode()
let decodedEmptyRes = HistoryResponseRPC.decode(pb.buffer)
## Then
check:
decodedEmptyRes.isOk()
check:
# check the correctness of init and encode for an empty HistoryResponseRPC
decodedEmptyRes.value == emptyRes

View File

@ -0,0 +1,109 @@
{.used.}
import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto
import
../../../waku/[
common/paging,
node/peer_manager,
waku_core,
waku_store_legacy,
waku_store_legacy/client,
],
../testlib/[common, wakucore],
./store_utils
suite "Waku Store - query handler":
asyncTest "history query handler should be called":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
## Given
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
let msg = fakeWakuMessage(contentTopic = DefaultContentTopic)
var queryHandlerFut = newFuture[(HistoryQuery)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return ok(HistoryResponse(messages: @[msg]))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD
)
## When
let queryRes = await client.query(req, peer = serverPeerInfo)
## Then
check:
not queryHandlerFut.failed()
queryRes.isOk()
let request = queryHandlerFut.read()
check:
request == req
let response = queryRes.tryGet()
check:
response.messages.len == 1
response.messages == @[msg]
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())
asyncTest "history query handler should be called and return an error":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
## Given
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
var queryHandlerFut = newFuture[(HistoryQuery)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD
)
## When
let queryRes = await client.query(req, peer = serverPeerInfo)
## Then
check:
not queryHandlerFut.failed()
queryRes.isErr()
let request = queryHandlerFut.read()
check:
request == req
let error = queryRes.tryError()
check:
error.kind == HistoryErrorKind.BAD_REQUEST
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())

View File

@ -0,0 +1,320 @@
{.used.}
import
stew/shims/net as stewNet,
testutils/unittests,
chronicles,
chronos,
libp2p/crypto/crypto,
libp2p/peerid,
libp2p/multiaddress,
libp2p/switch,
libp2p/protocols/pubsub/rpc/messages,
libp2p/protocols/pubsub/pubsub,
libp2p/protocols/pubsub/gossipsub
import
../../../waku/common/databases/db_sqlite,
../../../waku/common/paging,
../../../waku/waku_core,
../../../waku/waku_core/message/digest,
../../../waku/waku_core/subscription,
../../../waku/node/peer_manager,
../../../waku/waku_archive,
../../../waku/waku_archive/driver/sqlite_driver,
../../../waku/waku_filter_v2,
../../../waku/waku_filter_v2/client,
../../../waku/waku_store_legacy,
../../../waku/waku_node,
../waku_store_legacy/store_utils,
../waku_archive/archive_utils,
../testlib/common,
../testlib/wakucore,
../testlib/wakunode
procSuite "WakuNode - Store":
## Fixtures
let timeOrigin = now()
let msgListA =
@[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let archiveA = block:
let driver = newSqliteArchiveDriver()
for msg in msgListA:
let msg_digest = waku_archive.computeDigest(msg)
let msg_hash = computeMessageHash(DefaultPubsubTopic, msg)
require (
waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp)
).isOk()
driver
test "Store protocol returns expected messages":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic])
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
let queryRes = waitFor client.query(req, peer = serverPeer)
## Then
check queryRes.isOk()
let response = queryRes.get()
check:
response.messages == msgListA
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store node history response - forward pagination":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.FORWARD,
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[6]))
cursors[1] == none(HistoryCursor)
check:
pages[0] == msgListA[0 .. 6]
pages[1] == msgListA[7 .. 9]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store node history response - backward pagination":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.BACKWARD,
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[3]))
cursors[1] == none(HistoryCursor)
check:
pages[0] == msgListA[3 .. 9]
pages[1] == msgListA[0 .. 2]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store protocol returns expected message when relay is disabled and filter enabled":
## See nwaku issue #937: 'Store: ability to decouple store from relay'
## Setup
let
filterSourceKey = generateSecp256k1Key()
filterSource =
newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0))
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start(), filterSource.start())
waitFor filterSource.mountFilter()
let driver = newSqliteArchiveDriver()
let mountArchiveRes = server.mountArchive(driver)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
waitFor server.mountFilterClient()
client.mountLegacyStoreClient()
## Given
let message = fakeWakuMessage()
let
serverPeer = server.peerInfo.toRemotePeerInfo()
filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo()
## Then
let filterFut = newFuture[(PubsubTopic, WakuMessage)]()
proc filterHandler(
pubsubTopic: PubsubTopic, msg: WakuMessage
) {.async, gcsafe, closure.} =
await server.wakuArchive.handleMessage(pubsubTopic, msg)
filterFut.complete((pubsubTopic, msg))
server.wakuFilterClient.registerPushHandler(filterHandler)
let resp = waitFor server.filterSubscribe(
some(DefaultPubsubTopic), DefaultContentTopic, peer = filterSourcePeer
)
waitFor sleepAsync(100.millis)
waitFor filterSource.wakuFilter.handleMessage(DefaultPubsubTopic, message)
# Wait for the server filter to receive the push message
require waitFor filterFut.withTimeout(5.seconds)
let res = waitFor client.query(
HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer
)
## Then
check res.isOk()
let response = res.get()
check:
response.messages.len == 1
response.messages[0] == message
let (handledPubsubTopic, handledMsg) = filterFut.read()
check:
handledPubsubTopic == DefaultPubsubTopic
handledMsg == message
## Cleanup
waitFor allFutures(client.stop(), server.stop(), filterSource.stop())
test "history query should return INVALID_CURSOR if the cursor has empty data in the request":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Forcing a bad cursor with empty digest data
var data: array[32, byte] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
]
let cursor = HistoryCursor(
pubsubTopic: "pubsubTopic",
senderTime: now(),
storeTime: now(),
digest: waku_archive.MessageDigest(data: data),
)
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor))
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
let queryRes = waitFor client.query(req, peer = serverPeer)
## Then
check not queryRes.isOk()
check queryRes.error ==
"legacy store client query error: BAD_REQUEST: invalid cursor"
# Cleanup
waitFor allFutures(client.stop(), server.stop())

View File

@ -26,7 +26,6 @@ import
../../../waku/waku_archive/driver/queue_driver,
../../../waku/waku_store as waku_store,
../../../waku/common/base64,
../testlib/common,
../testlib/wakucore,
../testlib/wakunode
@ -37,7 +36,7 @@ proc put(
store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[Result[void, string]] =
let
digest = waku_archive.computeDigest(message)
digest = computeDigest(message)
msgHash = computeMessageHash(pubsubTopic, message)
receivedTime =
if message.timestamp > 0:
@ -60,25 +59,30 @@ proc testWakuNode(): WakuNode =
################################################################################
# Beginning of the tests
################################################################################
procSuite "Waku v2 Rest API - Store":
asyncTest "MessageDigest <-> string conversions":
# Validate MessageDigest conversion from a WakuMessage obj
procSuite "Waku Rest API - Store v3":
asyncTest "MessageHash <-> string conversions":
# Validate MessageHash conversion from a WakuMessage obj
let wakuMsg = WakuMessage(
contentTopic: "Test content topic", payload: @[byte('H'), byte('i'), byte('!')]
)
let messageDigest = waku_store.computeDigest(wakuMsg)
let restMsgDigest = some(messageDigest.toRestStringMessageDigest())
let parsedMsgDigest = restMsgDigest.parseMsgDigest().value
let messageHash = computeMessageHash(DefaultPubsubTopic, wakuMsg)
let restMsgHash = some(messageHash.toRestStringWakuMessageHash())
let parsedMsgHashRes = parseHash(restMsgHash)
assert parsedMsgHashRes.isOk(), $parsedMsgHashRes.error
check:
messageDigest == parsedMsgDigest.get()
messageHash == parsedMsgHashRes.get().get()
# Random validation. Obtained the raw values manually
let expected = some("ZjNhM2Q2NDkwMTE0MjMzNDg0MzJlMDdiZGI3NzIwYTc%3D")
let msgDigest = expected.parseMsgDigest().value
let expected = some("f6za9OzG1xSiEZagZc2b3litRbkd3zRl61rezDd3pgQ%3D")
let msgHashRes = parseHash(expected)
assert msgHashRes.isOk(), $msgHashRes.error
check:
expected.get() == msgDigest.get().toRestStringMessageDigest()
expected.get() == msgHashRes.get().get().toRestStringWakuMessageHash()
asyncTest "Filter by start and end time":
let node = testWakuNode()
@ -127,17 +131,17 @@ procSuite "Waku v2 Rest API - Store":
let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId
# Apply filter by start and end timestamps
var response = await client.getStoreMessagesV1(
var response = await client.getStoreMessagesV3(
encodeUrl(fullAddr),
"true", # include data
encodeUrl(DefaultPubsubTopic),
"", # empty content topics. Don't filter by this field
"3", # start time
"6", # end time
"", # sender time
"", # store time
"", # base64-encoded digest
"", # empty implies default page size
"", # hashes
"", # base64-encoded hash
"true", # ascending
"", # empty implies default page size
)
check:
@ -200,39 +204,35 @@ procSuite "Waku v2 Rest API - Store":
var pages = newSeq[seq[WakuMessage]](2)
# Fields that compose a HistoryCursor object
var reqPubsubTopic = DefaultPubsubTopic
var reqSenderTime = Timestamp(0)
var reqStoreTime = Timestamp(0)
var reqDigest = waku_store.MessageDigest()
var reqHash = none(WakuMessageHash)
for i in 0 ..< 2:
let response = await client.getStoreMessagesV1(
let response = await client.getStoreMessagesV3(
encodeUrl(fullAddr),
encodeUrl(reqPubsubTopic),
"true", # include data
encodeUrl(DefaultPubsubTopic),
"", # content topics. Empty ignores the field.
"", # start time. Empty ignores the field.
"", # end time. Empty ignores the field.
encodeUrl($reqSenderTime), # sender time
encodeUrl($reqStoreTime), # store time
reqDigest.toRestStringMessageDigest(),
# base64-encoded digest. Empty ignores the field.
"7", # page size. Empty implies default page size.
"", # hashes
if reqHash.isSome():
reqHash.get().toRestStringWakuMessageHash()
else:
""
, # base64-encoded digest. Empty ignores the field.
"true", # ascending
"7", # page size. Empty implies default page size.
)
var wakuMessages = newSeq[WakuMessage](0)
for j in 0 ..< response.data.messages.len:
wakuMessages.add(response.data.messages[j].toWakuMessage())
wakuMessages.add(response.data.messages[j].message)
pages[i] = wakuMessages
# populate the cursor for next page
if response.data.cursor.isSome():
reqPubsubTopic = response.data.cursor.get().pubsubTopic
reqDigest = response.data.cursor.get().digest
reqSenderTime = response.data.cursor.get().senderTime
reqStoreTime = response.data.cursor.get().storeTime
if response.data.paginationCursor.isSome():
reqHash = some(response.data.paginationCursor.get())
check:
response.status == 200
@ -289,8 +289,8 @@ procSuite "Waku v2 Rest API - Store":
let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId
# Filtering by a known pubsub topic
var response = await client.getStoreMessagesV1(
encodeUrl($fullAddr), encodeUrl(DefaultPubsubTopic)
var response = await client.getStoreMessagesV3(
encodeUrl($fullAddr), "true", encodeUrl(DefaultPubsubTopic)
)
check:
@ -299,15 +299,15 @@ procSuite "Waku v2 Rest API - Store":
response.data.messages.len == 3
# Get all the messages by specifying an empty pubsub topic
response = await client.getStoreMessagesV1(encodeUrl($fullAddr))
response = await client.getStoreMessagesV3(encodeUrl($fullAddr), "true")
check:
response.status == 200
$response.contentType == $MIMETYPE_JSON
response.data.messages.len == 3
# Receiving no messages by filtering with a random pubsub topic
response = await client.getStoreMessagesV1(
encodeUrl($fullAddr), encodeUrl("random pubsub topic")
response = await client.getStoreMessagesV3(
encodeUrl($fullAddr), "true", encodeUrl("random pubsub topic")
)
check:
response.status == 200
@ -362,8 +362,8 @@ procSuite "Waku v2 Rest API - Store":
# Filtering by a known pubsub topic.
# We also pass the store-node address in the request.
var response = await client.getStoreMessagesV1(
encodeUrl(fullAddr), encodeUrl(DefaultPubsubTopic)
var response = await client.getStoreMessagesV3(
encodeUrl(fullAddr), "true", encodeUrl(DefaultPubsubTopic)
)
check:
response.status == 200
@ -372,7 +372,8 @@ procSuite "Waku v2 Rest API - Store":
# Get all the messages by specifying an empty pubsub topic
# We also pass the store-node address in the request.
response = await client.getStoreMessagesV1(encodeUrl(fullAddr), encodeUrl(""))
response =
await client.getStoreMessagesV3(encodeUrl(fullAddr), "true", encodeUrl(""))
check:
response.status == 200
$response.contentType == $MIMETYPE_JSON
@ -380,8 +381,8 @@ procSuite "Waku v2 Rest API - Store":
# Receiving no messages by filtering with a random pubsub topic
# We also pass the store-node address in the request.
response = await client.getStoreMessagesV1(
encodeUrl(fullAddr), encodeUrl("random pubsub topic")
response = await client.getStoreMessagesV3(
encodeUrl(fullAddr), "true", encodeUrl("random pubsub topic")
)
check:
response.status == 200
@ -389,14 +390,16 @@ procSuite "Waku v2 Rest API - Store":
response.data.messages.len == 0
# Receiving 400 response if setting wrong store-node address
response = await client.getStoreMessagesV1(
encodeUrl("incorrect multi address format"), encodeUrl("random pubsub topic")
response = await client.getStoreMessagesV3(
encodeUrl("incorrect multi address format"),
"true",
encodeUrl("random pubsub topic"),
)
check:
response.status == 400
$response.contentType == $MIMETYPE_TEXT
response.data.messages.len == 0
response.data.error_message.get ==
response.data.statusDesc ==
"Failed parsing remote peer info [MultiAddress.init [multiaddress: Invalid MultiAddress, must start with `/`]]"
await restServer.stop()
@ -446,8 +449,8 @@ procSuite "Waku v2 Rest API - Store":
let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId
# Filtering by content topic
let response = await client.getStoreMessagesV1(
encodeUrl(fullAddr), encodeUrl(DefaultPubsubTopic), encodeUrl("ct1,ct2")
let response = await client.getStoreMessagesV3(
encodeUrl(fullAddr), "true", encodeUrl(DefaultPubsubTopic), encodeUrl("ct1,ct2")
)
check:
response.status == 200
@ -471,19 +474,35 @@ procSuite "Waku v2 Rest API - Store":
installStoreApiHandlers(restServer.router, node)
restServer.start()
# WakuStore setup
let driver: ArchiveDriver = QueueDriver.new()
let mountArchiveRes = node.mountArchive(driver)
assert mountArchiveRes.isOk(), mountArchiveRes.error
await node.mountStore()
node.mountStoreClient()
let key = generateEcdsaKey()
var peerSwitch = newStandardSwitch(some(key))
await peerSwitch.start()
peerSwitch.mount(node.wakuStore)
let client = newRestHttpClient(initTAddress(restAddress, restPort))
let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo()
# Sending no peer-store node address
var response = await client.getStoreMessagesV3(
encodeUrl(""), "true", encodeUrl(DefaultPubsubTopic)
)
check:
response.status == 412
$response.contentType == $MIMETYPE_TEXT
response.data.messages.len == 0
response.data.statusDesc == NoPeerNoDiscError.errobj.message
# Now add the storenode from "config"
node.peerManager.addServicePeer(remotePeerInfo, WakuStoreCodec)
# WakuStore setup
let driver: ArchiveDriver = QueueDriver.new()
let mountArchiveRes = node.mountArchive(driver)
assert mountArchiveRes.isOk(), mountArchiveRes.error
await node.mountStore()
# Now prime it with some history before tests
let msgList =
@ -495,26 +514,10 @@ procSuite "Waku v2 Rest API - Store":
for msg in msgList:
require (waitFor driver.put(DefaultPubsubTopic, msg)).isOk()
let client = newRestHttpClient(initTAddress(restAddress, restPort))
let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo()
let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId
# Sending no peer-store node address
var response =
await client.getStoreMessagesV1(encodeUrl(""), encodeUrl(DefaultPubsubTopic))
check:
response.status == 412
$response.contentType == $MIMETYPE_TEXT
response.data.messages.len == 0
response.data.error_message.get == NoPeerNoDiscError.errobj.message
# Now add the storenode from "config"
node.peerManager.addServicePeer(remotePeerInfo, WakuStoreCodec)
# Sending no peer-store node address
response =
await client.getStoreMessagesV1(encodeUrl(""), encodeUrl(DefaultPubsubTopic))
response = await client.getStoreMessagesV3(
encodeUrl(""), "true", encodeUrl(DefaultPubsubTopic)
)
check:
response.status == 200
$response.contentType == $MIMETYPE_JSON
@ -561,8 +564,9 @@ procSuite "Waku v2 Rest API - Store":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
# Filtering by a known pubsub topic.
var response =
await client.getStoreMessagesV1(none[string](), encodeUrl(DefaultPubsubTopic))
var response = await client.getStoreMessagesV3(
includeData = "true", pubsubTopic = encodeUrl(DefaultPubsubTopic)
)
check:
response.status == 200
@ -570,15 +574,17 @@ procSuite "Waku v2 Rest API - Store":
response.data.messages.len == 3
# Get all the messages by specifying an empty pubsub topic
response = await client.getStoreMessagesV1(none[string](), encodeUrl(""))
response =
await client.getStoreMessagesV3(includeData = "true", pubsubTopic = encodeUrl(""))
check:
response.status == 200
$response.contentType == $MIMETYPE_JSON
response.data.messages.len == 3
# Receiving no messages by filtering with a random pubsub topic
response =
await client.getStoreMessagesV1(none[string](), encodeUrl("random pubsub topic"))
response = await client.getStoreMessagesV3(
includeData = "true", pubsubTopic = encodeUrl("random pubsub topic")
)
check:
response.status == 200
$response.contentType == $MIMETYPE_JSON
@ -615,31 +621,24 @@ procSuite "Waku v2 Rest API - Store":
# Filtering by a known pubsub topic.
var response =
await client.getStoreMessagesV1(none[string](), encodeUrl(DefaultPubsubTopic))
await client.getStoreMessagesV3(pubsubTopic = encodeUrl(DefaultPubsubTopic))
check:
response.status == 200
$response.contentType == $MIMETYPE_JSON
response.data.messages.len == 1
let storeMessage = response.data.messages[0]
let storeMessage = response.data.messages[0].message
check:
storeMessage.contentTopic.isSome()
storeMessage.version.isSome()
storeMessage.timestamp.isSome()
storeMessage.ephemeral.isSome()
storeMessage.meta.isSome()
storeMessage.payload == msg.payload
storeMessage.contentTopic == msg.contentTopic
storeMessage.version == msg.version
storeMessage.timestamp == msg.timestamp
storeMessage.ephemeral == msg.ephemeral
storeMessage.meta == msg.meta
check:
storeMessage.payload == base64.encode(msg.payload)
storeMessage.contentTopic.get() == msg.contentTopic
storeMessage.version.get() == msg.version
storeMessage.timestamp.get() == msg.timestamp
storeMessage.ephemeral.get() == msg.ephemeral
storeMessage.meta.get() == base64.encode(msg.meta)
asyncTest "Rate limit store node history query":
asyncTest "Rate limit store node store query":
# Test adapted from the analogous present at waku_store/test_wakunode_store.nim
let node = testWakuNode()
await node.start()
@ -690,39 +689,36 @@ procSuite "Waku v2 Rest API - Store":
var pages = newSeq[seq[WakuMessage]](2)
# Fields that compose a HistoryCursor object
var reqPubsubTopic = DefaultPubsubTopic
var reqSenderTime = Timestamp(0)
var reqStoreTime = Timestamp(0)
var reqDigest = waku_store.MessageDigest()
var reqHash = none(WakuMessageHash)
for i in 0 ..< 2:
let response = await client.getStoreMessagesV1(
let response = await client.getStoreMessagesV3(
encodeUrl(fullAddr),
"true", # include data
encodeUrl(reqPubsubTopic),
"", # content topics. Empty ignores the field.
"", # start time. Empty ignores the field.
"", # end time. Empty ignores the field.
encodeUrl($reqSenderTime), # sender time
encodeUrl($reqStoreTime), # store time
reqDigest.toRestStringMessageDigest(),
# base64-encoded digest. Empty ignores the field.
"3", # page size. Empty implies default page size.
"", # hashes
if reqHash.isSome():
reqHash.get().toRestStringWakuMessageHash()
else:
""
, # base64-encoded digest. Empty ignores the field.
"true", # ascending
"3", # page size. Empty implies default page size.
)
var wakuMessages = newSeq[WakuMessage](0)
for j in 0 ..< response.data.messages.len:
wakuMessages.add(response.data.messages[j].toWakuMessage())
wakuMessages.add(response.data.messages[j].message)
pages[i] = wakuMessages
# populate the cursor for next page
if response.data.cursor.isSome():
reqPubsubTopic = response.data.cursor.get().pubsubTopic
reqDigest = response.data.cursor.get().digest
reqSenderTime = response.data.cursor.get().senderTime
reqStoreTime = response.data.cursor.get().storeTime
if response.data.paginationCursor.isSome():
reqHash = response.data.paginationCursor
check:
response.status == 200
@ -733,38 +729,44 @@ procSuite "Waku v2 Rest API - Store":
pages[1] == msgList[3 .. 5]
# request last third will lead to rate limit rejection
var response = await client.getStoreMessagesV1(
var response = await client.getStoreMessagesV3(
encodeUrl(fullAddr),
"true", # include data
encodeUrl(reqPubsubTopic),
"", # content topics. Empty ignores the field.
"", # start time. Empty ignores the field.
"", # end time. Empty ignores the field.
encodeUrl($reqSenderTime), # sender time
encodeUrl($reqStoreTime), # store time
reqDigest.toRestStringMessageDigest(),
# base64-encoded digest. Empty ignores the field.
"", # hashes
if reqHash.isSome():
reqHash.get().toRestStringWakuMessageHash()
else:
""
, # base64-encoded digest. Empty ignores the field.
)
check:
response.status == 429
$response.contentType == $MIMETYPE_TEXT
response.data.error_message.get == "Request rate limmit reached"
response.data.statusDesc == "Request rate limit reached"
await sleepAsync(500.millis)
# retry after respective amount of time shall succeed
response = await client.getStoreMessagesV1(
response = await client.getStoreMessagesV3(
encodeUrl(fullAddr),
"true", # include data
encodeUrl(reqPubsubTopic),
"", # content topics. Empty ignores the field.
"", # start time. Empty ignores the field.
"", # end time. Empty ignores the field.
encodeUrl($reqSenderTime), # sender time
encodeUrl($reqStoreTime), # store time
reqDigest.toRestStringMessageDigest(),
# base64-encoded digest. Empty ignores the field.
"5", # page size. Empty implies default page size.
"", # hashes
if reqHash.isSome():
reqHash.get().toRestStringWakuMessageHash()
else:
""
, # base64-encoded digest. Empty ignores the field.
"true", # ascending
"5", # page size. Empty implies default page size.
)
check:
@ -773,7 +775,7 @@ procSuite "Waku v2 Rest API - Store":
var wakuMessages = newSeq[WakuMessage](0)
for j in 0 ..< response.data.messages.len:
wakuMessages.add(response.data.messages[j].toWakuMessage())
wakuMessages.add(response.data.messages[j].message)
check wakuMessages == msgList[6 .. 9]

View File

@ -32,6 +32,7 @@ import
../../waku/waku_api/rest/filter/handlers as rest_filter_api,
../../waku/waku_api/rest/lightpush/handlers as rest_lightpush_api,
../../waku/waku_api/rest/store/handlers as rest_store_api,
../../waku/waku_api/rest/legacy_store/handlers as rest_legacy_store_api,
../../waku/waku_api/rest/health/handlers as rest_health_api,
../../waku/waku_api/rest/admin/handlers as rest_admin_api,
../../waku/waku_archive,

View File

@ -19,6 +19,9 @@ import
../discovery/waku_dnsdisc,
../waku_archive,
../waku_store,
../waku_store/common as store_common,
../waku_store_legacy,
../waku_store_legacy/common as legacy_common,
../waku_filter_v2,
../waku_peer_exchange,
../node/peer_manager,
@ -248,14 +251,28 @@ proc setupProtocols(
except CatchableError:
return err("failed to mount waku store protocol: " & getCurrentExceptionMsg())
try:
await mountLegacyStore(node)
except CatchableError:
return
err("failed to mount waku legacy store protocol: " & getCurrentExceptionMsg())
mountStoreClient(node)
if conf.storenode != "":
let storeNode = parsePeerInfo(conf.storenode)
if storeNode.isOk():
node.peerManager.addServicePeer(storeNode.value, WakuStoreCodec)
node.peerManager.addServicePeer(storeNode.value, store_common.WakuStoreCodec)
else:
return err("failed to set node waku store peer: " & storeNode.error)
mountLegacyStoreClient(node)
if conf.storenode != "":
let storeNode = parsePeerInfo(conf.storenode)
if storeNode.isOk():
node.peerManager.addServicePeer(storeNode.value, legacy_common.WakuStoreCodec)
else:
return err("failed to set node waku legacy store peer: " & storeNode.error)
# NOTE Must be mounted after relay
if conf.lightpush:
try:

View File

@ -31,8 +31,12 @@ import
../waku_core/topics/sharding,
../waku_relay,
../waku_archive,
../waku_store,
../waku_store_legacy/protocol as legacy_store,
../waku_store_legacy/client as legacy_store_client,
../waku_store_legacy/common as legacy_store_common,
../waku_store/protocol as store,
../waku_store/client as store_client,
../waku_store/common as store_common,
../waku_filter_v2,
../waku_filter_v2/client as filter_client,
../waku_filter_v2/subscriptions as filter_subscriptions,
@ -87,8 +91,10 @@ type
switch*: Switch
wakuRelay*: WakuRelay
wakuArchive*: WakuArchive
wakuStore*: WakuStore
wakuStoreClient*: WakuStoreClient
wakuLegacyStore*: legacy_store.WakuStore
wakuLegacyStoreClient*: legacy_store_client.WakuStoreClient
wakuStore*: store.WakuStore
wakuStoreClient*: store_client.WakuStoreClient
wakuFilter*: waku_filter_v2.WakuFilter
wakuFilterClient*: filter_client.WakuFilterClient
wakuRlnRelay*: WakuRLNRelay
@ -651,10 +657,10 @@ proc mountArchive*(
return ok()
## Waku store
## Legacy Waku Store
# TODO: Review this mapping logic. Maybe, move it to the appplication code
proc toArchiveQuery(request: HistoryQuery): ArchiveQuery =
proc toArchiveQuery(request: legacy_store_common.HistoryQuery): ArchiveQuery =
ArchiveQuery(
pubsubTopic: request.pubsubTopic,
contentTopics: request.contentTopics,
@ -674,7 +680,7 @@ proc toArchiveQuery(request: HistoryQuery): ArchiveQuery =
)
# TODO: Review this mapping logic. Maybe, move it to the appplication code
proc toHistoryResult*(res: ArchiveResult): HistoryResult =
proc toHistoryResult*(res: ArchiveResult): legacy_store_common.HistoryResult =
if res.isErr():
let error = res.error
case res.error.kind
@ -699,51 +705,57 @@ proc toHistoryResult*(res: ArchiveResult): HistoryResult =
)
)
proc mountStore*(
proc mountLegacyStore*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
) {.async, raises: [Defect, LPError].} =
info "mounting waku store protocol"
) {.async.} =
info "mounting waku legacy store protocol"
if node.wakuArchive.isNil():
error "failed to mount waku store protocol", error = "waku archive not set"
error "failed to mount waku legacy store protocol", error = "waku archive not set"
return
# TODO: Review this handler logic. Maybe, move it to the appplication code
let queryHandler: HistoryQueryHandler = proc(
request: HistoryQuery
): Future[HistoryResult] {.async.} =
): Future[legacy_store_common.HistoryResult] {.async.} =
if request.cursor.isSome():
request.cursor.get().checkHistCursor().isOkOr:
return err(error)
let request = request.toArchiveQuery()
let response = await node.wakuArchive.findMessages(request)
let response = await node.wakuArchive.findMessagesV2(request)
return response.toHistoryResult()
node.wakuStore =
WakuStore.new(node.peerManager, node.rng, queryHandler, some(rateLimit))
node.wakuLegacyStore = legacy_store.WakuStore.new(
node.peerManager, node.rng, queryHandler, some(rateLimit)
)
if node.started:
# Node has started already. Let's start store too.
await node.wakuStore.start()
await node.wakuLegacyStore.start()
node.switch.mount(node.wakuStore, protocolMatcher(WakuStoreCodec))
node.switch.mount(
node.wakuLegacyStore, protocolMatcher(legacy_store_common.WakuStoreCodec)
)
proc mountStoreClient*(node: WakuNode) =
info "mounting store client"
proc mountLegacyStoreClient*(node: WakuNode) =
info "mounting legacy store client"
node.wakuStoreClient = WakuStoreClient.new(node.peerManager, node.rng)
node.wakuLegacyStoreClient =
legacy_store_client.WakuStoreClient.new(node.peerManager, node.rng)
proc query*(
node: WakuNode, query: HistoryQuery, peer: RemotePeerInfo
): Future[WakuStoreResult[HistoryResponse]] {.async, gcsafe.} =
node: WakuNode, query: legacy_store_common.HistoryQuery, peer: RemotePeerInfo
): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {.
async, gcsafe
.} =
## Queries known nodes for historical messages
if node.wakuStoreClient.isNil():
return err("waku store client is nil")
if node.wakuLegacyStoreClient.isNil():
return err("waku legacy store client is nil")
let queryRes = await node.wakuStoreClient.query(query, peer)
let queryRes = await node.wakuLegacyStoreClient.query(query, peer)
if queryRes.isErr():
return err($queryRes.error)
return err("legacy store client query error: " & $queryRes.error)
let response = queryRes.get()
@ -751,15 +763,15 @@ proc query*(
# TODO: Move to application module (e.g., wakunode2.nim)
proc query*(
node: WakuNode, query: HistoryQuery
): Future[WakuStoreResult[HistoryResponse]] {.
node: WakuNode, query: legacy_store_common.HistoryQuery
): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {.
async, gcsafe, deprecated: "Use 'node.query()' with peer destination instead"
.} =
## Queries known nodes for historical messages
if node.wakuStoreClient.isNil():
return err("waku store client is nil")
if node.wakuLegacyStoreClient.isNil():
return err("waku legacy store client is nil")
let peerOpt = node.peerManager.selectPeer(WakuStoreCodec)
let peerOpt = node.peerManager.selectPeer(legacy_store_common.WakuStoreCodec)
if peerOpt.isNone():
error "no suitable remote peers"
return err("peer_not_found_failure")
@ -779,10 +791,10 @@ when defined(waku_exp_store_resume):
## peerList indicates the list of peers to query from. The history is fetched from the first available peer in this list. Such candidates should be found through a discovery method (to be developed).
## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from.
## The history gets fetched successfully if the dialed peer has been online during the queried time window.
if node.wakuStoreClient.isNil():
if node.wakuLegacyStoreClient.isNil():
return
let retrievedMessages = await node.wakuStoreClient.resume(peerList)
let retrievedMessages = await node.wakuLegacyStoreClient.resume(peerList)
if retrievedMessages.isErr():
error "failed to resume store", error = retrievedMessages.error
return
@ -790,6 +802,93 @@ when defined(waku_exp_store_resume):
info "the number of retrieved messages since the last online time: ",
number = retrievedMessages.value
## Waku Store
proc toArchiveQuery(request: StoreQueryRequest): ArchiveQuery =
var query = ArchiveQuery()
query.pubsubTopic = request.pubsubTopic
query.contentTopics = request.contentTopics
query.startTime = request.startTime
query.endTime = request.endTime
query.hashes = request.messageHashes
if request.paginationCursor.isSome():
var cursor = ArchiveCursor()
cursor.hash = request.paginationCursor.get()
query.cursor = some(cursor)
query.direction = request.paginationForward
if request.paginationLimit.isSome():
query.pageSize = uint(request.paginationLimit.get())
return query
proc toStoreResult(res: ArchiveResult): StoreQueryResult =
let response = res.valueOr:
return err(StoreError.new(300, "archive error: " & $error))
var res = StoreQueryResponse()
res.statusCode = 200
res.messages = response.hashes.zip(response.messages).mapIt(
WakuMessageKeyValue(messageHash: it[0], message: it[1])
)
if response.cursor.isSome():
res.paginationCursor = some(response.cursor.get().hash)
return ok(res)
proc mountStore*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
) {.async.} =
if node.wakuArchive.isNil():
error "failed to mount waku store protocol", error = "waku archive not set"
return
info "mounting waku store protocol"
let requestHandler: StoreQueryRequestHandler = proc(
request: StoreQueryRequest
): Future[StoreQueryResult] {.async.} =
let request = request.toArchiveQuery()
let response = await node.wakuArchive.findMessages(request)
return response.toStoreResult()
node.wakuStore =
store.WakuStore.new(node.peerManager, node.rng, requestHandler, some(rateLimit))
if node.started:
await node.wakuStore.start()
node.switch.mount(node.wakuStore, protocolMatcher(store_common.WakuStoreCodec))
proc mountStoreClient*(node: WakuNode) =
info "mounting store client"
node.wakuStoreClient = store_client.WakuStoreClient.new(node.peerManager, node.rng)
proc query*(
node: WakuNode, request: store_common.StoreQueryRequest, peer: RemotePeerInfo
): Future[store_common.WakuStoreResult[store_common.StoreQueryResponse]] {.
async, gcsafe
.} =
## Queries known nodes for historical messages
if node.wakuStoreClient.isNil():
return err("waku store v3 client is nil")
let response = (await node.wakuStoreClient.query(request, peer)).valueOr:
var res = StoreQueryResponse()
res.statusCode = uint32(error.kind)
res.statusDesc = $error
return ok(res)
return ok(response)
## Waku lightpush
proc mountLightPush*(

View File

@ -4,7 +4,7 @@ else:
{.push raises: [].}
import
std/[strformat, sequtils, sets, tables],
std/[strformat, sequtils, tables],
stew/byteutils,
chronicles,
json_serialization,
@ -13,7 +13,7 @@ import
import
../../../waku_core,
../../../waku_store,
../../../waku_store_legacy/common,
../../../waku_filter_v2,
../../../waku_lightpush/common,
../../../waku_relay,
@ -66,7 +66,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
)
tuplesToWakuPeers(peers, filterV2Peers)
if not node.wakuStore.isNil():
if not node.wakuLegacyStore.isNil():
# Map WakuStore peers to WakuPeers and add to return list
let storePeers = node.peerManager.peerStore.peers(WakuStoreCodec).mapIt(
(

View File

@ -18,6 +18,7 @@ import
../../waku/waku_api/rest/filter/handlers as rest_filter_api,
../../waku/waku_api/rest/lightpush/handlers as rest_lightpush_api,
../../waku/waku_api/rest/store/handlers as rest_store_api,
../../waku/waku_api/rest/legacy_store/handlers as rest_store_legacy_api,
../../waku/waku_api/rest/health/handlers as rest_health_api,
../../waku/waku_api/rest/admin/handlers as rest_admin_api,
../../waku/waku_core/topics
@ -172,7 +173,8 @@ proc startRestServerProtocolSupport*(
else:
none(DiscoveryHandler)
installStoreApiHandlers(router, node, storeDiscoHandler)
rest_store_api.installStoreApiHandlers(router, node, storeDiscoHandler)
rest_store_legacy_api.installStoreApiHandlers(router, node, storeDiscoHandler)
## Light push API
if conf.lightpushnode != "" and node.wakuLightpushClient != nil:

View File

@ -0,0 +1,78 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
chronicles, json_serialization, json_serialization/std/options, presto/[route, client]
import ../../../waku_store_legacy/common, ../serdes, ../responses, ./types
export types
logScope:
topics = "waku node rest legacy store_api"
proc decodeBytes*(
t: typedesc[StoreResponseRest],
data: openArray[byte],
contentType: Opt[ContentTypeData],
): RestResult[StoreResponseRest] =
if MediaType.init($contentType) == MIMETYPE_JSON:
let decoded = ?decodeFromJsonBytes(StoreResponseRest, data)
return ok(decoded)
if MediaType.init($contentType) == MIMETYPE_TEXT:
var res: string
if len(data) > 0:
res = newString(len(data))
copyMem(addr res[0], unsafeAddr data[0], len(data))
return ok(
StoreResponseRest(
messages: newSeq[StoreWakuMessage](0),
cursor: none(HistoryCursorRest),
# field that contain error information
errorMessage: some(res),
)
)
# If everything goes wrong
return err(cstring("Unsupported contentType " & $contentType))
proc getStoreMessagesV1*(
# URL-encoded reference to the store-node
peerAddr: string = "",
pubsubTopic: string = "",
# URL-encoded comma-separated list of content topics
contentTopics: string = "",
startTime: string = "",
endTime: string = "",
# Optional cursor fields
senderTime: string = "",
storeTime: string = "",
digest: string = "", # base64-encoded digest
pageSize: string = "",
ascending: string = "",
): RestResponse[StoreResponseRest] {.
rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet
.}
proc getStoreMessagesV1*(
# URL-encoded reference to the store-node
peerAddr: Option[string],
pubsubTopic: string = "",
# URL-encoded comma-separated list of content topics
contentTopics: string = "",
startTime: string = "",
endTime: string = "",
# Optional cursor fields
senderTime: string = "",
storeTime: string = "",
digest: string = "", # base64-encoded digest
pageSize: string = "",
ascending: string = "",
): RestResponse[StoreResponseRest] {.
rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet
.}

View File

@ -0,0 +1,258 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/strformat, stew/results, chronicles, uri, json_serialization, presto/route
import
../../../waku_core,
../../../waku_store_legacy/common,
../../../waku_store_legacy/self_req_handler,
../../../waku_node,
../../../node/peer_manager,
../../../common/paging,
../../handlers,
../responses,
../serdes,
./types
export types
logScope:
topics = "waku node rest legacy store_api"
const futTimeout* = 5.seconds # Max time to wait for futures
const NoPeerNoDiscError* =
RestApiResponse.preconditionFailed("No suitable service peer & no discovery method")
# Queries the store-node with the query parameters and
# returns a RestApiResponse that is sent back to the api client.
proc performHistoryQuery(
selfNode: WakuNode, histQuery: HistoryQuery, storePeer: RemotePeerInfo
): Future[RestApiResponse] {.async.} =
let queryFut = selfNode.query(histQuery, storePeer)
if not await queryFut.withTimeout(futTimeout):
const msg = "No history response received (timeout)"
error msg
return RestApiResponse.internalServerError(msg)
let res = queryFut.read()
if res.isErr():
const msg = "Error occurred in queryFut.read()"
error msg, error = res.error
return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]"))
let storeResp = res.value.toStoreResponseRest()
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200)
if resp.isErr():
const msg = "Error building the json respose"
error msg, error = resp.error
return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]"))
return resp.get()
# Converts a string time representation into an Option[Timestamp].
# Only positive time is considered a valid Timestamp in the request
proc parseTime(input: Option[string]): Result[Option[Timestamp], string] =
if input.isSome() and input.get() != "":
try:
let time = parseInt(input.get())
if time > 0:
return ok(some(Timestamp(time)))
except ValueError:
return err("Problem parsing time [" & getCurrentExceptionMsg() & "]")
return ok(none(Timestamp))
# Generates a history query cursor as per the given params
proc parseCursor(
parsedPubsubTopic: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
): Result[Option[HistoryCursor], string] =
# Parse sender time
let parsedSenderTime = parseTime(senderTime)
if not parsedSenderTime.isOk():
return err(parsedSenderTime.error)
# Parse store time
let parsedStoreTime = parseTime(storeTime)
if not parsedStoreTime.isOk():
return err(parsedStoreTime.error)
# Parse message digest
let parsedMsgDigest = parseMsgDigest(digest)
if not parsedMsgDigest.isOk():
return err(parsedMsgDigest.error)
# Parse cursor information
if parsedPubsubTopic.isSome() and parsedSenderTime.value.isSome() and
parsedStoreTime.value.isSome() and parsedMsgDigest.value.isSome():
return ok(
some(
HistoryCursor(
pubsubTopic: parsedPubsubTopic.get(),
senderTime: parsedSenderTime.value.get(),
storeTime: parsedStoreTime.value.get(),
digest: parsedMsgDigest.value.get(),
)
)
)
else:
return ok(none(HistoryCursor))
# Creates a HistoryQuery from the given params
proc createHistoryQuery(
pubsubTopic: Option[string],
contentTopics: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
startTime: Option[string],
endTime: Option[string],
pageSize: Option[string],
direction: Option[string],
): Result[HistoryQuery, string] =
# Parse pubsubTopic parameter
var parsedPubsubTopic = none(string)
if pubsubTopic.isSome():
let decodedPubsubTopic = decodeUrl(pubsubTopic.get())
if decodedPubsubTopic != "":
parsedPubsubTopic = some(decodedPubsubTopic)
# Parse the content topics
var parsedContentTopics = newSeq[ContentTopic](0)
if contentTopics.isSome():
let ctList = decodeUrl(contentTopics.get())
if ctList != "":
for ct in ctList.split(','):
parsedContentTopics.add(ct)
# Parse cursor information
let parsedCursor = ?parseCursor(parsedPubsubTopic, senderTime, storeTime, digest)
# Parse page size field
var parsedPagedSize = DefaultPageSize
if pageSize.isSome() and pageSize.get() != "":
try:
parsedPagedSize = uint64(parseInt(pageSize.get()))
except CatchableError:
return err("Problem parsing page size [" & getCurrentExceptionMsg() & "]")
# Parse start time
let parsedStartTime = ?parseTime(startTime)
# Parse end time
let parsedEndTime = ?parseTime(endTime)
# Parse ascending field
var parsedDirection = default()
if direction.isSome() and direction.get() != "":
parsedDirection = direction.get().into()
return ok(
HistoryQuery(
pubsubTopic: parsedPubsubTopic,
contentTopics: parsedContentTopics,
startTime: parsedStartTime,
endTime: parsedEndTime,
direction: parsedDirection,
pageSize: parsedPagedSize,
cursor: parsedCursor,
)
)
# Simple type conversion. The "Option[Result[string, cstring]]"
# type is used by the nim-presto library.
proc toOpt(self: Option[Result[string, cstring]]): Option[string] =
if not self.isSome() or self.get().value == "":
return none(string)
if self.isSome() and self.get().value != "":
return some(self.get().value)
proc retrieveMsgsFromSelfNode(
self: WakuNode, histQuery: HistoryQuery
): Future[RestApiResponse] {.async.} =
## Performs a "store" request to the local node (self node.)
## Notice that this doesn't follow the regular store libp2p channel because a node
## it is not allowed to libp2p-dial a node to itself, by default.
##
let selfResp = (await self.wakuLegacyStore.handleSelfStoreRequest(histQuery)).valueOr:
return RestApiResponse.internalServerError($error)
let storeResp = selfResp.toStoreResponseRest()
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr:
const msg = "Error building the json respose"
error msg, error = error
return RestApiResponse.internalServerError(fmt("{msg} [{error}]"))
return resp
# Subscribes the rest handler to attend "/store/v1/messages" requests
proc installStoreApiHandlers*(
router: var RestRouter,
node: WakuNode,
discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler),
) =
# Handles the store-query request according to the passed parameters
router.api(MethodGet, "/store/v1/messages") do(
peerAddr: Option[string],
pubsubTopic: Option[string],
contentTopics: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
startTime: Option[string],
endTime: Option[string],
pageSize: Option[string],
ascending: Option[string]
) -> RestApiResponse:
debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr
# All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding)
# Example:
# /store/v1/messages?peerAddr=%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\&pubsubTopic=my-waku-topic
# Parse the rest of the parameters and create a HistoryQuery
let histQuery = createHistoryQuery(
pubsubTopic.toOpt(),
contentTopics.toOpt(),
senderTime.toOpt(),
storeTime.toOpt(),
digest.toOpt(),
startTime.toOpt(),
endTime.toOpt(),
pageSize.toOpt(),
ascending.toOpt(),
)
if not histQuery.isOk():
return RestApiResponse.badRequest(histQuery.error)
if peerAddr.isNone() and not node.wakuLegacyStore.isNil():
## The user didn't specify a peer address and self-node is configured as a store node.
## In this case we assume that the user is willing to retrieve the messages stored by
## the local/self store node.
return await node.retrieveMsgsFromSelfNode(histQuery.get())
# Parse the peer address parameter
let parsedPeerAddr = parseUrlPeerAddr(peerAddr.toOpt()).valueOr:
return RestApiResponse.badRequest(error)
let peerAddr = parsedPeerAddr.valueOr:
node.peerManager.selectPeer(WakuStoreCodec).valueOr:
let handler = discHandler.valueOr:
return NoPeerNoDiscError
let peerOp = (await handler()).valueOr:
return RestApiResponse.internalServerError($error)
peerOp.valueOr:
return RestApiResponse.preconditionFailed(
"No suitable service peer & none discovered"
)
return await node.performHistoryQuery(histQuery.value, peerAddr)

View File

@ -0,0 +1,383 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[sets, strformat, uri],
stew/byteutils,
chronicles,
json_serialization,
json_serialization/std/options,
presto/[route, client, common]
import
../../../waku_store_legacy/common as waku_store_common,
../../../common/base64,
../../../waku_core,
../serdes
#### Types
type
HistoryCursorRest* = object
pubsubTopic*: PubsubTopic
senderTime*: Timestamp
storeTime*: Timestamp
digest*: waku_store_common.MessageDigest
StoreRequestRest* = object
# inspired by https://github.com/waku-org/nwaku/blob/f95147f5b7edfd45f914586f2d41cd18fb0e0d18/waku/v2//waku_store/common.nim#L52
pubsubTopic*: Option[PubsubTopic]
contentTopics*: seq[ContentTopic]
cursor*: Option[HistoryCursorRest]
startTime*: Option[Timestamp]
endTime*: Option[Timestamp]
pageSize*: uint64
ascending*: bool
StoreWakuMessage* = object
payload*: Base64String
contentTopic*: Option[ContentTopic]
version*: Option[uint32]
timestamp*: Option[Timestamp]
ephemeral*: Option[bool]
meta*: Option[Base64String]
StoreResponseRest* = object # inspired by https://rfc.vac.dev/spec/16/#storeresponse
messages*: seq[StoreWakuMessage]
cursor*: Option[HistoryCursorRest]
# field that contains error information
errorMessage*: Option[string]
createJsonFlavor RestJson
Json.setWriter JsonWriter, PreferredOutput = string
#### Type conversion
# Converts a URL-encoded-base64 string into a 'MessageDigest'
proc parseMsgDigest*(
input: Option[string]
): Result[Option[waku_store_common.MessageDigest], string] =
if not input.isSome() or input.get() == "":
return ok(none(waku_store_common.MessageDigest))
let decodedUrl = decodeUrl(input.get())
let base64Decoded = base64.decode(Base64String(decodedUrl))
var messageDigest = waku_store_common.MessageDigest()
if not base64Decoded.isOk():
return err(base64Decoded.error)
let base64DecodedArr = base64Decoded.get()
# Next snippet inspired by "nwaku/waku/waku_archive/archive.nim"
# TODO: Improve coherence of MessageDigest type
messageDigest = block:
var data: array[32, byte]
for i in 0 ..< min(base64DecodedArr.len, 32):
data[i] = base64DecodedArr[i]
waku_store_common.MessageDigest(data: data)
return ok(some(messageDigest))
# Converts a given MessageDigest object into a suitable
# Base64-URL-encoded string suitable to be transmitted in a Rest
# request-response. The MessageDigest is first base64 encoded
# and this result is URL-encoded.
proc toRestStringMessageDigest*(self: waku_store_common.MessageDigest): string =
let base64Encoded = base64.encode(self.data)
encodeUrl($base64Encoded)
proc toWakuMessage*(message: StoreWakuMessage): WakuMessage =
WakuMessage(
payload: base64.decode(message.payload).get(),
contentTopic: message.contentTopic.get(),
version: message.version.get(),
timestamp: message.timestamp.get(),
ephemeral: message.ephemeral.get(),
meta: message.meta.get(Base64String("")).decode().get(),
)
# Converts a 'HistoryResponse' object to an 'StoreResponseRest'
# that can be serialized to a json object.
proc toStoreResponseRest*(histResp: HistoryResponse): StoreResponseRest =
proc toStoreWakuMessage(message: WakuMessage): StoreWakuMessage =
StoreWakuMessage(
payload: base64.encode(message.payload),
contentTopic: some(message.contentTopic),
version: some(message.version),
timestamp: some(message.timestamp),
ephemeral: some(message.ephemeral),
meta:
if message.meta.len > 0:
some(base64.encode(message.meta))
else:
none(Base64String)
,
)
var storeWakuMsgs: seq[StoreWakuMessage]
for m in histResp.messages:
storeWakuMsgs.add(m.toStoreWakuMessage())
var cursor = none(HistoryCursorRest)
if histResp.cursor.isSome:
cursor = some(
HistoryCursorRest(
pubsubTopic: histResp.cursor.get().pubsubTopic,
senderTime: histResp.cursor.get().senderTime,
storeTime: histResp.cursor.get().storeTime,
digest: histResp.cursor.get().digest,
)
)
StoreResponseRest(messages: storeWakuMsgs, cursor: cursor)
## Beginning of StoreWakuMessage serde
proc writeValue*(
writer: var JsonWriter, value: StoreWakuMessage
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("payload", $value.payload)
if value.contentTopic.isSome():
writer.writeField("content_topic", value.contentTopic.get())
if value.version.isSome():
writer.writeField("version", value.version.get())
if value.timestamp.isSome():
writer.writeField("timestamp", value.timestamp.get())
if value.ephemeral.isSome():
writer.writeField("ephemeral", value.ephemeral.get())
if value.meta.isSome():
writer.writeField("meta", value.meta.get())
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var StoreWakuMessage
) {.gcsafe, raises: [SerializationError, IOError].} =
var
payload = none(Base64String)
contentTopic = none(ContentTopic)
version = none(uint32)
timestamp = none(Timestamp)
ephemeral = none(bool)
meta = none(Base64String)
var keys = initHashSet[string]()
for fieldName in readObjectFields(reader):
# Check for reapeated keys
if keys.containsOrIncl(fieldName):
let err =
try:
fmt"Multiple `{fieldName}` fields found"
except CatchableError:
"Multiple fields with the same name found"
reader.raiseUnexpectedField(err, "StoreWakuMessage")
case fieldName
of "payload":
payload = some(reader.readValue(Base64String))
of "content_topic":
contentTopic = some(reader.readValue(ContentTopic))
of "version":
version = some(reader.readValue(uint32))
of "timestamp":
timestamp = some(reader.readValue(Timestamp))
of "ephemeral":
ephemeral = some(reader.readValue(bool))
of "meta":
meta = some(reader.readValue(Base64String))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if payload.isNone():
reader.raiseUnexpectedValue("Field `payload` is missing")
value = StoreWakuMessage(
payload: payload.get(),
contentTopic: contentTopic,
version: version,
timestamp: timestamp,
ephemeral: ephemeral,
meta: meta,
)
## End of StoreWakuMessage serde
## Beginning of MessageDigest serde
proc writeValue*(
writer: var JsonWriter, value: waku_store_common.MessageDigest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("data", base64.encode(value.data))
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var waku_store_common.MessageDigest
) {.gcsafe, raises: [SerializationError, IOError].} =
var data = none(seq[byte])
for fieldName in readObjectFields(reader):
case fieldName
of "data":
if data.isSome():
reader.raiseUnexpectedField("Multiple `data` fields found", "MessageDigest")
let decoded = base64.decode(reader.readValue(Base64String))
if not decoded.isOk():
reader.raiseUnexpectedField("Failed decoding data", "MessageDigest")
data = some(decoded.get())
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if data.isNone():
reader.raiseUnexpectedValue("Field `data` is missing")
for i in 0 ..< 32:
value.data[i] = data.get()[i]
## End of MessageDigest serde
## Beginning of HistoryCursorRest serde
proc writeValue*(
writer: var JsonWriter, value: HistoryCursorRest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("pubsub_topic", value.pubsubTopic)
writer.writeField("sender_time", value.senderTime)
writer.writeField("store_time", value.storeTime)
writer.writeField("digest", value.digest)
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var HistoryCursorRest
) {.gcsafe, raises: [SerializationError, IOError].} =
var
pubsubTopic = none(PubsubTopic)
senderTime = none(Timestamp)
storeTime = none(Timestamp)
digest = none(waku_store_common.MessageDigest)
for fieldName in readObjectFields(reader):
case fieldName
of "pubsub_topic":
if pubsubTopic.isSome():
reader.raiseUnexpectedField(
"Multiple `pubsub_topic` fields found", "HistoryCursorRest"
)
pubsubTopic = some(reader.readValue(PubsubTopic))
of "sender_time":
if senderTime.isSome():
reader.raiseUnexpectedField(
"Multiple `sender_time` fields found", "HistoryCursorRest"
)
senderTime = some(reader.readValue(Timestamp))
of "store_time":
if storeTime.isSome():
reader.raiseUnexpectedField(
"Multiple `store_time` fields found", "HistoryCursorRest"
)
storeTime = some(reader.readValue(Timestamp))
of "digest":
if digest.isSome():
reader.raiseUnexpectedField(
"Multiple `digest` fields found", "HistoryCursorRest"
)
digest = some(reader.readValue(waku_store_common.MessageDigest))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if pubsubTopic.isNone():
reader.raiseUnexpectedValue("Field `pubsub_topic` is missing")
if senderTime.isNone():
reader.raiseUnexpectedValue("Field `sender_time` is missing")
if storeTime.isNone():
reader.raiseUnexpectedValue("Field `store_time` is missing")
if digest.isNone():
reader.raiseUnexpectedValue("Field `digest` is missing")
value = HistoryCursorRest(
pubsubTopic: pubsubTopic.get(),
senderTime: senderTime.get(),
storeTime: storeTime.get(),
digest: digest.get(),
)
## End of HistoryCursorRest serde
## Beginning of StoreResponseRest serde
proc writeValue*(
writer: var JsonWriter, value: StoreResponseRest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("messages", value.messages)
if value.cursor.isSome():
writer.writeField("cursor", value.cursor.get())
if value.errorMessage.isSome():
writer.writeField("error_message", value.errorMessage.get())
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var StoreResponseRest
) {.gcsafe, raises: [SerializationError, IOError].} =
var
messages = none(seq[StoreWakuMessage])
cursor = none(HistoryCursorRest)
errorMessage = none(string)
for fieldName in readObjectFields(reader):
case fieldName
of "messages":
if messages.isSome():
reader.raiseUnexpectedField(
"Multiple `messages` fields found", "StoreResponseRest"
)
messages = some(reader.readValue(seq[StoreWakuMessage]))
of "cursor":
if cursor.isSome():
reader.raiseUnexpectedField(
"Multiple `cursor` fields found", "StoreResponseRest"
)
cursor = some(reader.readValue(HistoryCursorRest))
of "error_message":
if errorMessage.isSome():
reader.raiseUnexpectedField(
"Multiple `error_message` fields found", "StoreResponseRest"
)
errorMessage = some(reader.readValue(string))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if messages.isNone():
reader.raiseUnexpectedValue("Field `messages` is missing")
value = StoreResponseRest(
messages: messages.get(), cursor: cursor, errorMessage: errorMessage
)
## End of StoreResponseRest serde
## Beginning of StoreRequestRest serde
proc writeValue*(
writer: var JsonWriter, value: StoreRequestRest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
if value.pubsubTopic.isSome():
writer.writeField("pubsub_topic", value.pubsubTopic.get())
writer.writeField("content_topics", value.contentTopics)
if value.startTime.isSome():
writer.writeField("start_time", value.startTime.get())
if value.endTime.isSome():
writer.writeField("end_time", value.endTime.get())
writer.writeField("page_size", value.pageSize)
writer.writeField("ascending", value.ascending)
writer.endRecord()
## End of StoreRequestRest serde

View File

@ -6,7 +6,11 @@ else:
import
chronicles, json_serialization, json_serialization/std/options, presto/[route, client]
import
../../../waku_store/common, ../../../common/base64, ../serdes, ../responses, ./types
../../../waku_store/common,
../../../waku_core/message/digest,
../serdes,
../responses,
./types
export types
@ -14,12 +18,12 @@ logScope:
topics = "waku node rest store_api"
proc decodeBytes*(
t: typedesc[StoreResponseRest],
t: typedesc[StoreQueryResponse],
data: openArray[byte],
contentType: Opt[ContentTypeData],
): RestResult[StoreResponseRest] =
): RestResult[StoreQueryResponse] =
if MediaType.init($contentType) == MIMETYPE_JSON:
let decoded = ?decodeFromJsonBytes(StoreResponseRest, data)
let decoded = ?decodeFromJsonBytes(StoreQueryResponse, data)
return ok(decoded)
if MediaType.init($contentType) == MIMETYPE_TEXT:
@ -29,51 +33,34 @@ proc decodeBytes*(
copyMem(addr res[0], unsafeAddr data[0], len(data))
return ok(
StoreResponseRest(
messages: newSeq[StoreWakuMessage](0),
cursor: none(HistoryCursorRest),
# field that contain error information
errorMessage: some(res),
StoreQueryResponse(
statusCode: uint32(ErrorCode.BAD_RESPONSE),
statusDesc: res,
messages: newSeq[WakuMessageKeyValue](0),
paginationCursor: none(WakuMessageHash),
)
)
# If everything goes wrong
return err(cstring("Unsupported contentType " & $contentType))
proc getStoreMessagesV1*(
proc getStoreMessagesV3*(
# URL-encoded reference to the store-node
peerAddr: string = "",
includeData: string = "",
pubsubTopic: string = "",
# URL-encoded comma-separated list of content topics
contentTopics: string = "",
startTime: string = "",
endTime: string = "",
# Optional cursor fields
senderTime: string = "",
storeTime: string = "",
digest: string = "", # base64-encoded digest
pageSize: string = "",
ascending: string = "",
): RestResponse[StoreResponseRest] {.
rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet
.}
proc getStoreMessagesV1*(
# URL-encoded reference to the store-node
peerAddr: Option[string],
pubsubTopic: string = "",
# URL-encoded comma-separated list of content topics
contentTopics: string = "",
startTime: string = "",
endTime: string = "",
# URL-encoded comma-separated list of message hashes
hashes: string = "",
# Optional cursor fields
senderTime: string = "",
storeTime: string = "",
digest: string = "", # base64-encoded digest
pageSize: string = "",
cursor: string = "", # base64-encoded hash
ascending: string = "",
): RestResponse[StoreResponseRest] {.
rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet
pageSize: string = "",
): RestResponse[StoreQueryResponse] {.
rest, endpoint: "/store/v3/messages", meth: HttpMethod.MethodGet
.}

View File

@ -28,35 +28,35 @@ const NoPeerNoDiscError* =
# Queries the store-node with the query parameters and
# returns a RestApiResponse that is sent back to the api client.
proc performHistoryQuery(
selfNode: WakuNode, histQuery: HistoryQuery, storePeer: RemotePeerInfo
proc performStoreQuery(
selfNode: WakuNode, storeQuery: StoreQueryRequest, storePeer: RemotePeerInfo
): Future[RestApiResponse] {.async.} =
let queryFut = selfNode.query(histQuery, storePeer)
let queryFut = selfNode.query(storeQuery, storePeer)
if not await queryFut.withTimeout(futTimeout):
const msg = "No history response received (timeout)"
error msg
return RestApiResponse.internalServerError(msg)
let res = queryFut.read()
if res.isErr():
const TooManyRequestErrorStr =
$HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS)
if res.error == TooManyRequestErrorStr:
debug "Request rate limmit reached on peer ", storePeer
return RestApiResponse.tooManyRequests("Request rate limmit reached")
else:
const msg = "Error occurred in queryFut.read()"
error msg, error = res.error
return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]"))
let futRes = queryFut.read()
let storeResp = res.value.toStoreResponseRest()
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200)
if resp.isErr():
if futRes.isErr():
const msg = "Error occurred in queryFut.read()"
error msg, error = futRes.error
return RestApiResponse.internalServerError(fmt("{msg} [{futRes.error}]"))
let res = futRes.get()
if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS):
debug "Request rate limit reached on peer ", storePeer
return RestApiResponse.tooManyRequests("Request rate limit reached")
let resp = RestApiResponse.jsonResponse(res, status = Http200).valueOr:
const msg = "Error building the json respose"
error msg, error = resp.error
return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]"))
error msg, error = error
return RestApiResponse.internalServerError(fmt("{msg} [{error}]"))
return resp.get()
return resp
# Converts a string time representation into an Option[Timestamp].
# Only positive time is considered a valid Timestamp in the request
@ -67,60 +67,34 @@ proc parseTime(input: Option[string]): Result[Option[Timestamp], string] =
if time > 0:
return ok(some(Timestamp(time)))
except ValueError:
return err("Problem parsing time [" & getCurrentExceptionMsg() & "]")
return err("time parsing error: " & getCurrentExceptionMsg())
return ok(none(Timestamp))
# Generates a history query cursor as per the given params
proc parseCursor(
parsedPubsubTopic: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
): Result[Option[HistoryCursor], string] =
# Parse sender time
let parsedSenderTime = parseTime(senderTime)
if not parsedSenderTime.isOk():
return err(parsedSenderTime.error)
proc parseIncludeData(input: Option[string]): Result[bool, string] =
var includeData = false
if input.isSome() and input.get() != "":
try:
includeData = parseBool(input.get())
except ValueError:
return err("include data parsing error: " & getCurrentExceptionMsg())
# Parse store time
let parsedStoreTime = parseTime(storeTime)
if not parsedStoreTime.isOk():
return err(parsedStoreTime.error)
# Parse message digest
let parsedMsgDigest = parseMsgDigest(digest)
if not parsedMsgDigest.isOk():
return err(parsedMsgDigest.error)
# Parse cursor information
if parsedPubsubTopic.isSome() and parsedSenderTime.value.isSome() and
parsedStoreTime.value.isSome() and parsedMsgDigest.value.isSome():
return ok(
some(
HistoryCursor(
pubsubTopic: parsedPubsubTopic.get(),
senderTime: parsedSenderTime.value.get(),
storeTime: parsedStoreTime.value.get(),
digest: parsedMsgDigest.value.get(),
)
)
)
else:
return ok(none(HistoryCursor))
return ok(includeData)
# Creates a HistoryQuery from the given params
proc createHistoryQuery(
proc createStoreQuery(
includeData: Option[string],
pubsubTopic: Option[string],
contentTopics: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
startTime: Option[string],
endTime: Option[string],
pageSize: Option[string],
hashes: Option[string],
cursor: Option[string],
direction: Option[string],
): Result[HistoryQuery, string] =
pageSize: Option[string],
): Result[StoreQueryRequest, string] =
var parsedIncludeData = ?parseIncludeData(includeData)
# Parse pubsubTopic parameter
var parsedPubsubTopic = none(string)
if pubsubTopic.isSome():
@ -136,37 +110,41 @@ proc createHistoryQuery(
for ct in ctList.split(','):
parsedContentTopics.add(ct)
# Parse cursor information
let parsedCursor = ?parseCursor(parsedPubsubTopic, senderTime, storeTime, digest)
# Parse page size field
var parsedPagedSize = DefaultPageSize
if pageSize.isSome() and pageSize.get() != "":
try:
parsedPagedSize = uint64(parseInt(pageSize.get()))
except CatchableError:
return err("Problem parsing page size [" & getCurrentExceptionMsg() & "]")
# Parse start time
let parsedStartTime = ?parseTime(startTime)
# Parse end time
let parsedEndTime = ?parseTime(endTime)
var parsedHashes = ?parseHashes(hashes)
# Parse cursor information
let parsedCursor = ?parseHash(cursor)
# Parse ascending field
var parsedDirection = default()
if direction.isSome() and direction.get() != "":
parsedDirection = direction.get().into()
# Parse page size field
var parsedPagedSize = none(uint64)
if pageSize.isSome() and pageSize.get() != "":
try:
parsedPagedSize = some(uint64(parseInt(pageSize.get())))
except CatchableError:
return err("page size parsing error: " & getCurrentExceptionMsg())
return ok(
HistoryQuery(
StoreQueryRequest(
includeData: parsedIncludeData,
pubsubTopic: parsedPubsubTopic,
contentTopics: parsedContentTopics,
startTime: parsedStartTime,
endTime: parsedEndTime,
direction: parsedDirection,
pageSize: parsedPagedSize,
cursor: parsedCursor,
messageHashes: parsedHashes,
paginationCursor: parsedCursor,
paginationForward: parsedDirection,
paginationLimit: parsedPagedSize,
)
)
@ -179,17 +157,16 @@ proc toOpt(self: Option[Result[string, cstring]]): Option[string] =
return some(self.get().value)
proc retrieveMsgsFromSelfNode(
self: WakuNode, histQuery: HistoryQuery
self: WakuNode, storeQuery: StoreQueryRequest
): Future[RestApiResponse] {.async.} =
## Performs a "store" request to the local node (self node.)
## Notice that this doesn't follow the regular store libp2p channel because a node
## it is not allowed to libp2p-dial a node to itself, by default.
##
let selfResp = (await self.wakuStore.handleSelfStoreRequest(histQuery)).valueOr:
let storeResp = (await self.wakuStore.handleSelfStoreRequest(storeQuery)).valueOr:
return RestApiResponse.internalServerError($error)
let storeResp = selfResp.toStoreResponseRest()
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr:
const msg = "Error building the json respose"
error msg, error = error
@ -204,51 +181,51 @@ proc installStoreApiHandlers*(
discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler),
) =
# Handles the store-query request according to the passed parameters
router.api(MethodGet, "/store/v1/messages") do(
router.api(MethodGet, "/store/v3/messages") do(
peerAddr: Option[string],
includeData: Option[string],
pubsubTopic: Option[string],
contentTopics: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
startTime: Option[string],
endTime: Option[string],
pageSize: Option[string],
ascending: Option[string]
hashes: Option[string],
cursor: Option[string],
ascending: Option[string],
pageSize: Option[string]
) -> RestApiResponse:
debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr
let peer = peerAddr.toOpt()
debug "REST-GET /store/v3/messages ", peer_addr = $peer
# All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding)
# Example:
# /store/v1/messages?peerAddr=%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\&pubsubTopic=my-waku-topic
# Parse the rest of the parameters and create a HistoryQuery
let histQuery = createHistoryQuery(
let storeQuery = createStoreQuery(
includeData.toOpt(),
pubsubTopic.toOpt(),
contentTopics.toOpt(),
senderTime.toOpt(),
storeTime.toOpt(),
digest.toOpt(),
startTime.toOpt(),
endTime.toOpt(),
pageSize.toOpt(),
hashes.toOpt(),
cursor.toOpt(),
ascending.toOpt(),
)
pageSize.toOpt(),
).valueOr:
return RestApiResponse.badRequest(error)
if not histQuery.isOk():
return RestApiResponse.badRequest(histQuery.error)
if peerAddr.isNone() and not node.wakuStore.isNil():
if peer.isNone() and not node.wakuStore.isNil():
## The user didn't specify a peer address and self-node is configured as a store node.
## In this case we assume that the user is willing to retrieve the messages stored by
## the local/self store node.
return await node.retrieveMsgsFromSelfNode(histQuery.get())
return await node.retrieveMsgsFromSelfNode(storeQuery)
# Parse the peer address parameter
let parsedPeerAddr = parseUrlPeerAddr(peerAddr.toOpt()).valueOr:
let parsedPeerAddr = parseUrlPeerAddr(peer).valueOr:
return RestApiResponse.badRequest(error)
let peerAddr = parsedPeerAddr.valueOr:
let peerInfo = parsedPeerAddr.valueOr:
node.peerManager.selectPeer(WakuStoreCodec).valueOr:
let handler = discHandler.valueOr:
return NoPeerNoDiscError
@ -261,4 +238,4 @@ proc installStoreApiHandlers*(
"No suitable service peer & none discovered"
)
return await node.performHistoryQuery(histQuery.value, peerAddr)
return await node.performStoreQuery(storeQuery, peerInfo)

View File

@ -4,163 +4,99 @@ else:
{.push raises: [].}
import
std/[sets, strformat, uri],
stew/byteutils,
std/[sets, strformat, uri, options],
stew/[byteutils, arrayops],
chronicles,
json_serialization,
json_serialization/std/options,
presto/[route, client, common]
import
../../../waku_store/common as waku_store_common,
../../../common/base64,
../../../waku_core,
../serdes
import ../../../waku_store/common, ../../../common/base64, ../../../waku_core, ../serdes
#### Types
type
HistoryCursorRest* = object
pubsubTopic*: PubsubTopic
senderTime*: Timestamp
storeTime*: Timestamp
digest*: MessageDigest
StoreRequestRest* = object
# inspired by https://github.com/waku-org/nwaku/blob/f95147f5b7edfd45f914586f2d41cd18fb0e0d18/waku/v2//waku_store/common.nim#L52
pubsubTopic*: Option[PubsubTopic]
contentTopics*: seq[ContentTopic]
cursor*: Option[HistoryCursorRest]
startTime*: Option[Timestamp]
endTime*: Option[Timestamp]
pageSize*: uint64
ascending*: bool
StoreWakuMessage* = object
payload*: Base64String
contentTopic*: Option[ContentTopic]
version*: Option[uint32]
timestamp*: Option[Timestamp]
ephemeral*: Option[bool]
meta*: Option[Base64String]
StoreResponseRest* = object # inspired by https://rfc.vac.dev/spec/16/#storeresponse
messages*: seq[StoreWakuMessage]
cursor*: Option[HistoryCursorRest]
# field that contains error information
errorMessage*: Option[string]
createJsonFlavor RestJson
Json.setWriter JsonWriter, PreferredOutput = string
#### Type conversion
# Converts a URL-encoded-base64 string into a 'MessageDigest'
proc parseMsgDigest*(input: Option[string]): Result[Option[MessageDigest], string] =
proc parseHash*(input: Option[string]): Result[Option[WakuMessageHash], string] =
let base64UrlEncoded =
if input.isSome():
input.get()
else:
return ok(none(WakuMessageHash))
if base64UrlEncoded == "":
return ok(none(WakuMessageHash))
let base64Encoded = decodeUrl(base64UrlEncoded)
let decodedBytes = base64.decode(Base64String(base64Encoded)).valueOr:
return err("waku message hash parsing error: " & error)
let hash: WakuMessageHash = fromBytes(decodedBytes)
return ok(some(hash))
proc parseHashes*(input: Option[string]): Result[seq[WakuMessageHash], string] =
var hashes: seq[WakuMessageHash] = @[]
if not input.isSome() or input.get() == "":
return ok(none(MessageDigest))
return ok(hashes)
let decodedUrl = decodeUrl(input.get())
let base64Decoded = base64.decode(Base64String(decodedUrl))
var messageDigest = MessageDigest()
if not base64Decoded.isOk():
return err(base64Decoded.error)
if decodedUrl != "":
for subString in decodedUrl.split(','):
let hash = ?parseHash(some(subString))
let base64DecodedArr = base64Decoded.get()
# Next snippet inspired by "nwaku/waku/waku_archive/archive.nim"
# TODO: Improve coherence of MessageDigest type
messageDigest = block:
var data: array[32, byte]
for i in 0 ..< min(base64DecodedArr.len, 32):
data[i] = base64DecodedArr[i]
if hash.isSome():
hashes.add(hash.get())
MessageDigest(data: data)
return ok(some(messageDigest))
return ok(hashes)
# Converts a given MessageDigest object into a suitable
# Base64-URL-encoded string suitable to be transmitted in a Rest
# request-response. The MessageDigest is first base64 encoded
# and this result is URL-encoded.
proc toRestStringMessageDigest*(self: MessageDigest): string =
let base64Encoded = base64.encode(self.data)
proc toRestStringWakuMessageHash*(self: WakuMessageHash): string =
let base64Encoded = base64.encode(self)
encodeUrl($base64Encoded)
proc toWakuMessage*(message: StoreWakuMessage): WakuMessage =
WakuMessage(
payload: base64.decode(message.payload).get(),
contentTopic: message.contentTopic.get(),
version: message.version.get(),
timestamp: message.timestamp.get(),
ephemeral: message.ephemeral.get(),
meta: message.meta.get(Base64String("")).decode().get(),
)
# Converts a 'HistoryResponse' object to an 'StoreResponseRest'
# that can be serialized to a json object.
proc toStoreResponseRest*(histResp: HistoryResponse): StoreResponseRest =
proc toStoreWakuMessage(message: WakuMessage): StoreWakuMessage =
StoreWakuMessage(
payload: base64.encode(message.payload),
contentTopic: some(message.contentTopic),
version: some(message.version),
timestamp: some(message.timestamp),
ephemeral: some(message.ephemeral),
meta:
if message.meta.len > 0:
some(base64.encode(message.meta))
else:
none(Base64String)
,
)
var storeWakuMsgs: seq[StoreWakuMessage]
for m in histResp.messages:
storeWakuMsgs.add(m.toStoreWakuMessage())
var cursor = none(HistoryCursorRest)
if histResp.cursor.isSome:
cursor = some(
HistoryCursorRest(
pubsubTopic: histResp.cursor.get().pubsubTopic,
senderTime: histResp.cursor.get().senderTime,
storeTime: histResp.cursor.get().storeTime,
digest: histResp.cursor.get().digest,
)
)
StoreResponseRest(messages: storeWakuMsgs, cursor: cursor)
## Beginning of StoreWakuMessage serde
## WakuMessage serde
proc writeValue*(
writer: var JsonWriter, value: StoreWakuMessage
writer: var JsonWriter, msg: WakuMessage
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("payload", $value.payload)
if value.contentTopic.isSome():
writer.writeField("contentTopic", value.contentTopic.get())
if value.version.isSome():
writer.writeField("version", value.version.get())
if value.timestamp.isSome():
writer.writeField("timestamp", value.timestamp.get())
if value.ephemeral.isSome():
writer.writeField("ephemeral", value.ephemeral.get())
if value.meta.isSome():
writer.writeField("meta", value.meta.get())
writer.writeField("payload", base64.encode(msg.payload))
writer.writeField("contentTopic", msg.contentTopic)
if msg.meta.len > 0:
writer.writeField("meta", base64.encode(msg.meta))
writer.writeField("version", msg.version)
writer.writeField("timestamp", msg.timestamp)
writer.writeField("ephemeral", msg.ephemeral)
if msg.proof.len > 0:
writer.writeField("proof", base64.encode(msg.proof))
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var StoreWakuMessage
reader: var JsonReader, value: var WakuMessage
) {.gcsafe, raises: [SerializationError, IOError].} =
var
payload = none(Base64String)
contentTopic = none(ContentTopic)
version = none(uint32)
timestamp = none(Timestamp)
ephemeral = none(bool)
meta = none(Base64String)
payload: seq[byte]
contentTopic: ContentTopic
version: uint32
timestamp: Timestamp
ephemeral: bool
meta: seq[byte]
proof: seq[byte]
var keys = initHashSet[string]()
for fieldName in readObjectFields(reader):
@ -171,49 +107,56 @@ proc readValue*(
fmt"Multiple `{fieldName}` fields found"
except CatchableError:
"Multiple fields with the same name found"
reader.raiseUnexpectedField(err, "StoreWakuMessage")
reader.raiseUnexpectedField(err, "WakuMessage")
case fieldName
of "payload":
payload = some(reader.readValue(Base64String))
let base64String = reader.readValue(Base64String)
payload = base64.decode(base64String).valueOr:
reader.raiseUnexpectedField("Failed decoding data", "payload")
of "contentTopic":
contentTopic = some(reader.readValue(ContentTopic))
contentTopic = reader.readValue(ContentTopic)
of "version":
version = some(reader.readValue(uint32))
version = reader.readValue(uint32)
of "timestamp":
timestamp = some(reader.readValue(Timestamp))
timestamp = reader.readValue(Timestamp)
of "ephemeral":
ephemeral = some(reader.readValue(bool))
ephemeral = reader.readValue(bool)
of "meta":
meta = some(reader.readValue(Base64String))
let base64String = reader.readValue(Base64String)
meta = base64.decode(base64String).valueOr:
reader.raiseUnexpectedField("Failed decoding data", "meta")
of "proof":
let base64String = reader.readValue(Base64String)
proof = base64.decode(base64String).valueOr:
reader.raiseUnexpectedField("Failed decoding data", "proof")
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if payload.isNone():
if payload.len == 0:
reader.raiseUnexpectedValue("Field `payload` is missing")
value = StoreWakuMessage(
payload: payload.get(),
value = WakuMessage(
payload: payload,
contentTopic: contentTopic,
version: version,
timestamp: timestamp,
ephemeral: ephemeral,
meta: meta,
proof: proof,
)
## End of StoreWakuMessage serde
## Beginning of MessageDigest serde
## WakuMessageHash serde
proc writeValue*(
writer: var JsonWriter, value: MessageDigest
writer: var JsonWriter, value: WakuMessageHash
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("data", base64.encode(value.data))
writer.writeField("data", base64.encode(value))
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var MessageDigest
reader: var JsonReader, value: var WakuMessageHash
) {.gcsafe, raises: [SerializationError, IOError].} =
var data = none(seq[byte])
@ -221,10 +164,10 @@ proc readValue*(
case fieldName
of "data":
if data.isSome():
reader.raiseUnexpectedField("Multiple `data` fields found", "MessageDigest")
reader.raiseUnexpectedField("Multiple `data` fields found", "WakuMessageHash")
let decoded = base64.decode(reader.readValue(Base64String))
if not decoded.isOk():
reader.raiseUnexpectedField("Failed decoding data", "MessageDigest")
reader.raiseUnexpectedField("Failed decoding data", "WakuMessageHash")
data = some(decoded.get())
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
@ -233,149 +176,165 @@ proc readValue*(
reader.raiseUnexpectedValue("Field `data` is missing")
for i in 0 ..< 32:
value.data[i] = data.get()[i]
value[i] = data.get()[i]
## End of MessageDigest serde
## Beginning of HistoryCursorRest serde
## WakuMessageKeyValue serde
proc writeValue*(
writer: var JsonWriter, value: HistoryCursorRest
writer: var JsonWriter, value: WakuMessageKeyValue
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("pubsub_topic", value.pubsubTopic)
writer.writeField("sender_time", value.senderTime)
writer.writeField("store_time", value.storeTime)
writer.writeField("digest", value.digest)
writer.writeField("message_hash", value.messageHash)
writer.writeField("message", value.message)
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var HistoryCursorRest
reader: var JsonReader, value: var WakuMessageKeyValue
) {.gcsafe, raises: [SerializationError, IOError].} =
var
pubsubTopic = none(PubsubTopic)
senderTime = none(Timestamp)
storeTime = none(Timestamp)
digest = none(MessageDigest)
messageHash = none(WakuMessageHash)
message = none(WakuMessage)
for fieldName in readObjectFields(reader):
case fieldName
of "pubsub_topic":
if pubsubTopic.isSome():
of "message_hash":
if messageHash.isSome():
reader.raiseUnexpectedField(
"Multiple `pubsub_topic` fields found", "HistoryCursorRest"
"Multiple `message_hash` fields found", "WakuMessageKeyValue"
)
pubsubTopic = some(reader.readValue(PubsubTopic))
of "sender_time":
if senderTime.isSome():
messageHash = some(reader.readValue(WakuMessageHash))
of "message":
if message.isSome():
reader.raiseUnexpectedField(
"Multiple `sender_time` fields found", "HistoryCursorRest"
"Multiple `message` fields found", "WakuMessageKeyValue"
)
senderTime = some(reader.readValue(Timestamp))
of "store_time":
if storeTime.isSome():
reader.raiseUnexpectedField(
"Multiple `store_time` fields found", "HistoryCursorRest"
)
storeTime = some(reader.readValue(Timestamp))
of "digest":
if digest.isSome():
reader.raiseUnexpectedField(
"Multiple `digest` fields found", "HistoryCursorRest"
)
digest = some(reader.readValue(MessageDigest))
message = some(reader.readValue(WakuMessage))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if pubsubTopic.isNone():
reader.raiseUnexpectedValue("Field `pubsub_topic` is missing")
if messageHash.isNone():
reader.raiseUnexpectedValue("Field `message_hash` is missing")
if senderTime.isNone():
reader.raiseUnexpectedValue("Field `sender_time` is missing")
if message.isNone():
reader.raiseUnexpectedValue("Field `message` is missing")
if storeTime.isNone():
reader.raiseUnexpectedValue("Field `store_time` is missing")
value = WakuMessageKeyValue(messageHash: messageHash.get(), message: message.get())
if digest.isNone():
reader.raiseUnexpectedValue("Field `digest` is missing")
value = HistoryCursorRest(
pubsubTopic: pubsubTopic.get(),
senderTime: senderTime.get(),
storeTime: storeTime.get(),
digest: digest.get(),
)
## End of HistoryCursorRest serde
## Beginning of StoreResponseRest serde
## StoreQueryResponse serde
proc writeValue*(
writer: var JsonWriter, value: StoreResponseRest
writer: var JsonWriter, value: StoreQueryResponse
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("request_id", value.requestId)
writer.writeField("status_code", value.statusCode)
writer.writeField("status_desc", value.statusDesc)
writer.writeField("messages", value.messages)
if value.cursor.isSome():
writer.writeField("cursor", value.cursor.get())
if value.errorMessage.isSome():
writer.writeField("error_message", value.errorMessage.get())
if value.paginationCursor.isSome():
writer.writeField("pagination_cursor", value.paginationCursor.get())
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var StoreResponseRest
reader: var JsonReader, value: var StoreQueryResponse
) {.gcsafe, raises: [SerializationError, IOError].} =
var
messages = none(seq[StoreWakuMessage])
cursor = none(HistoryCursorRest)
errorMessage = none(string)
requestId = none(string)
code = none(uint32)
desc = none(string)
messages = none(seq[WakuMessageKeyValue])
cursor = none(WakuMessageHash)
for fieldName in readObjectFields(reader):
case fieldName
of "request_id":
if requestId.isSome():
reader.raiseUnexpectedField(
"Multiple `request_id` fields found", "StoreQueryResponse"
)
requestId = some(reader.readValue(string))
of "status_code":
if code.isSome():
reader.raiseUnexpectedField(
"Multiple `status_code` fields found", "StoreQueryResponse"
)
code = some(reader.readValue(uint32))
of "status_desc":
if desc.isSome():
reader.raiseUnexpectedField(
"Multiple `status_desc` fields found", "StoreQueryResponse"
)
desc = some(reader.readValue(string))
of "messages":
if messages.isSome():
reader.raiseUnexpectedField(
"Multiple `messages` fields found", "StoreResponseRest"
"Multiple `messages` fields found", "StoreQueryResponse"
)
messages = some(reader.readValue(seq[StoreWakuMessage]))
of "cursor":
messages = some(reader.readValue(seq[WakuMessageKeyValue]))
of "pagination_cursor":
if cursor.isSome():
reader.raiseUnexpectedField(
"Multiple `cursor` fields found", "StoreResponseRest"
"Multiple `pagination_cursor` fields found", "StoreQueryResponse"
)
cursor = some(reader.readValue(HistoryCursorRest))
of "error_message":
if errorMessage.isSome():
reader.raiseUnexpectedField(
"Multiple `error_message` fields found", "StoreResponseRest"
)
errorMessage = some(reader.readValue(string))
cursor = some(reader.readValue(WakuMessageHash))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if requestId.isNone():
reader.raiseUnexpectedValue("Field `request_id` is missing")
if code.isNone():
reader.raiseUnexpectedValue("Field `status_code` is missing")
if desc.isNone():
reader.raiseUnexpectedValue("Field `status_desc` is missing")
if messages.isNone():
reader.raiseUnexpectedValue("Field `messages` is missing")
value = StoreResponseRest(
messages: messages.get(), cursor: cursor, errorMessage: errorMessage
value = StoreQueryResponse(
requestId: requestId.get(),
statusCode: code.get(),
statusDesc: desc.get(),
messages: messages.get(),
paginationCursor: cursor,
)
## End of StoreResponseRest serde
## Beginning of StoreRequestRest serde
## StoreRequestRest serde
proc writeValue*(
writer: var JsonWriter, value: StoreRequestRest
writer: var JsonWriter, req: StoreQueryRequest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
if value.pubsubTopic.isSome():
writer.writeField("pubsub_topic", value.pubsubTopic.get())
writer.writeField("content_topics", value.contentTopics)
if value.startTime.isSome():
writer.writeField("start_time", value.startTime.get())
if value.endTime.isSome():
writer.writeField("end_time", value.endTime.get())
writer.writeField("page_size", value.pageSize)
writer.writeField("ascending", value.ascending)
writer.endRecord()
## End of StoreRequestRest serde
writer.writeField("request_id", req.requestId)
writer.writeField("include_data", req.includeData)
if req.pubsubTopic.isSome():
writer.writeField("pubsub_topic", req.pubsubTopic.get())
writer.writeField("content_topics", req.contentTopics)
if req.startTime.isSome():
writer.writeField("start_time", req.startTime.get())
if req.endTime.isSome():
writer.writeField("end_time", req.endTime.get())
writer.writeField("message_hashes", req.messageHashes)
if req.paginationCursor.isSome():
writer.writeField("pagination_cursor", req.paginationCursor.get())
writer.writeField("pagination_forward", req.paginationForward)
if req.paginationLimit.isSome():
writer.writeField("pagination_limit", req.paginationLimit.get())
writer.endRecord()

View File

@ -193,6 +193,74 @@ proc findMessages*(
return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor))
proc findMessagesV2*(
self: WakuArchive, query: ArchiveQuery
): Future[ArchiveResult] {.async, gcsafe.} =
## Search the archive to return a single page of messages matching the query criteria
let maxPageSize =
if query.pageSize <= 0:
DefaultPageSize
else:
min(query.pageSize, MaxPageSize)
let isAscendingOrder = query.direction.into()
if query.contentTopics.len > 10:
return err(ArchiveError.invalidQuery("too many content topics"))
let queryStartTime = getTime().toUnixFloat()
let rows = (
await self.driver.getMessagesV2(
contentTopic = query.contentTopics,
pubsubTopic = query.pubsubTopic,
cursor = query.cursor,
startTime = query.startTime,
endTime = query.endTime,
maxPageSize = maxPageSize + 1,
ascendingOrder = isAscendingOrder,
)
).valueOr:
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
let queryDuration = getTime().toUnixFloat() - queryStartTime
waku_archive_query_duration_seconds.observe(queryDuration)
var messages = newSeq[WakuMessage]()
var cursor = none(ArchiveCursor)
if rows.len == 0:
return ok(ArchiveResponse(messages: messages, cursor: cursor))
## Messages
let pageSize = min(rows.len, int(maxPageSize))
messages = rows[0 ..< pageSize].mapIt(it[1])
## Cursor
if rows.len > int(maxPageSize):
## Build last message cursor
## The cursor is built from the last message INCLUDED in the response
## (i.e. the second last message in the rows list)
let (pubsubTopic, message, digest, storeTimestamp, _) = rows[^2]
cursor = some(
ArchiveCursor(
digest: MessageDigest.fromBytes(digest),
storeTime: storeTimestamp,
sendertime: message.timestamp,
pubsubTopic: pubsubTopic,
)
)
# All messages MUST be returned in chronological order
if not isAscendingOrder:
reverse(messages)
return ok(ArchiveResponse(messages: messages, cursor: cursor))
proc periodicRetentionPolicy(self: WakuArchive) {.async.} =
debug "executing message retention policy"

View File

@ -32,6 +32,18 @@ method getAllMessages*(
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} =
discard
method getMessagesV2*(
driver: ArchiveDriver,
contentTopic = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} =
discard
method getMessages*(
driver: ArchiveDriver,
contentTopic = newSeq[ContentTopic](0),

View File

@ -35,6 +35,48 @@ const InsertRowStmtDefinition = # TODO: get the sql queries from a file
const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc"
const SelectNoCursorAscStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
storedAt >= $4 AND
storedAt <= $5
ORDER BY storedAt ASC, messageHash ASC LIMIT $6;"""
const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc"
const SelectNoCursorDescStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
storedAt >= $4 AND
storedAt <= $5
ORDER BY storedAt DESC, messageHash DESC LIMIT $6;"""
const SelectWithCursorDescStmtName = "SelectWithCursorDesc"
const SelectWithCursorDescStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
(storedAt, messageHash) < ($4,$5) AND
storedAt >= $6 AND
storedAt <= $7
ORDER BY storedAt DESC, messageHash DESC LIMIT $8;"""
const SelectWithCursorAscStmtName = "SelectWithCursorAsc"
const SelectWithCursorAscStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
(storedAt, messageHash) > ($4,$5) AND
storedAt >= $6 AND
storedAt <= $7
ORDER BY storedAt ASC, messageHash ASC LIMIT $8;"""
const SelectNoCursorV2AscStmtName = "SelectWithoutCursorV2Asc"
const SelectNoCursorV2AscStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
@ -42,8 +84,8 @@ const SelectNoCursorAscStmtDef =
storedAt <= $4
ORDER BY storedAt ASC LIMIT $5;"""
const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc"
const SelectNoCursorDescStmtDef =
const SelectNoCursorV2DescStmtName = "SelectWithoutCursorV2Desc"
const SelectNoCursorV2DescStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
@ -51,8 +93,8 @@ const SelectNoCursorDescStmtDef =
storedAt <= $4
ORDER BY storedAt DESC LIMIT $5;"""
const SelectWithCursorDescStmtName = "SelectWithCursorDesc"
const SelectWithCursorDescStmtDef =
const SelectWithCursorV2DescStmtName = "SelectWithCursorV2Desc"
const SelectWithCursorV2DescStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
@ -61,8 +103,8 @@ const SelectWithCursorDescStmtDef =
storedAt <= $6
ORDER BY storedAt DESC LIMIT $7;"""
const SelectWithCursorAscStmtName = "SelectWithCursorAsc"
const SelectWithCursorAscStmtDef =
const SelectWithCursorV2AscStmtName = "SelectWithCursorV2Asc"
const SelectWithCursorV2AscStmtDef =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
@ -289,6 +331,70 @@ proc getMessagesArbitraryQuery(
statements.add("pubsubTopic = ?")
args.add(pubsubTopic.get())
if cursor.isSome():
let comp = if ascendingOrder: ">" else: "<"
statements.add("(storedAt, messageHash) " & comp & " (?,?)")
args.add($cursor.get().storeTime)
args.add(toHex(cursor.get().hash))
if startTime.isSome():
statements.add("storedAt >= ?")
args.add($startTime.get())
if endTime.isSome():
statements.add("storedAt <= ?")
args.add($endTime.get())
if statements.len > 0:
query &= " WHERE " & statements.join(" AND ")
var direction: string
if ascendingOrder:
direction = "ASC"
else:
direction = "DESC"
query &= " ORDER BY storedAt " & direction & ", messageHash " & direction
query &= " LIMIT ?"
args.add($maxPageSize)
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
(await s.readConnPool.pgQuery(query, args, rowCallback)).isOkOr:
return err("failed to run query: " & $error)
return ok(rows)
proc getMessagesV2ArbitraryQuery(
s: PostgresDriver,
contentTopic: seq[ContentTopic] = @[],
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
## This proc allows to handle atypical queries. We don't use prepared statements for those.
var query =
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages"""
var statements: seq[string]
var args: seq[string]
if contentTopic.len > 0:
let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")"
statements.add(cstmt)
for t in contentTopic:
args.add(t)
if pubsubTopic.isSome():
statements.add("pubsubTopic = ?")
args.add(pubsubTopic.get())
if cursor.isSome():
let comp = if ascendingOrder: ">" else: "<"
statements.add("(storedAt, id) " & comp & " (?,?)")
@ -333,6 +439,7 @@ proc getMessagesPreparedStmt(
cursor = none(ArchiveCursor),
startTime: Timestamp,
endTime: Timestamp,
hashes: string,
maxPageSize = DefaultPageSize,
ascOrder = true,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
@ -355,6 +462,85 @@ proc getMessagesPreparedStmt(
var stmtDef =
if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef
let hash = toHex(cursor.get().hash)
let storeTime = $cursor.get().storeTime
(
await s.readConnPool.runStmt(
stmtName,
stmtDef,
@[
contentTopic, hashes, pubsubTopic, storeTime, hash, startTimeStr, endTimeStr,
limit,
],
@[
int32(contentTopic.len),
int32(pubsubTopic.len),
int32(storeTime.len),
int32(hash.len),
int32(startTimeStr.len),
int32(endTimeStr.len),
int32(limit.len),
],
@[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)],
rowCallback,
)
).isOkOr:
return err("failed to run query with cursor: " & $error)
else:
var stmtName =
if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName
var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef
(
await s.readConnPool.runStmt(
stmtName,
stmtDef,
@[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit],
@[
int32(contentTopic.len),
int32(pubsubTopic.len),
int32(startTimeStr.len),
int32(endTimeStr.len),
int32(limit.len),
],
@[int32(0), int32(0), int32(0), int32(0), int32(0)],
rowCallback,
)
).isOkOr:
return err("failed to run query without cursor: " & $error)
return ok(rows)
proc getMessagesV2PreparedStmt(
s: PostgresDriver,
contentTopic: string,
pubsubTopic: PubsubTopic,
cursor = none(ArchiveCursor),
startTime: Timestamp,
endTime: Timestamp,
maxPageSize = DefaultPageSize,
ascOrder = true,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
## This proc aims to run the most typical queries in a more performant way, i.e. by means of
## prepared statements.
##
## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'"
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
let startTimeStr = $startTime
let endTimeStr = $endTime
let limit = $maxPageSize
if cursor.isSome():
var stmtName =
if ascOrder: SelectWithCursorV2AscStmtName else: SelectWithCursorV2DescStmtName
var stmtDef =
if ascOrder: SelectWithCursorV2AscStmtDef else: SelectWithCursorV2DescStmtDef
let digest = toHex(cursor.get().digest.data)
let storeTime = $cursor.get().storeTime
@ -379,8 +565,9 @@ proc getMessagesPreparedStmt(
return err("failed to run query with cursor: " & $error)
else:
var stmtName =
if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName
var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef
if ascOrder: SelectNoCursorV2AscStmtName else: SelectNoCursorV2DescStmtName
var stmtDef =
if ascOrder: SelectNoCursorV2AscStmtDef else: SelectNoCursorV2DescStmtDef
(
await s.readConnPool.runStmt(
@ -415,10 +602,40 @@ method getMessages*(
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
let hexHashes = hashes.mapIt(toHex(it))
if contentTopicSeq.len == 1 and hexHashes.len == 1 and pubsubTopic.isSome() and
startTime.isSome() and endTime.isSome():
## Considered the most common query. Therefore, we use prepared statements to optimize it.
return await s.getMessagesPreparedStmt(
contentTopicSeq.join(","),
PubsubTopic(pubsubTopic.get()),
cursor,
startTime.get(),
endTime.get(),
hexHashes.join(","),
maxPageSize,
ascendingOrder,
)
else:
## We will run atypical query. In this case we don't use prepared statemets
return await s.getMessagesArbitraryQuery(
contentTopicSeq, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize,
ascendingOrder,
)
method getMessagesV2*(
s: PostgresDriver,
contentTopicSeq = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
if contentTopicSeq.len == 1 and pubsubTopic.isSome() and startTime.isSome() and
endTime.isSome():
## Considered the most common query. Therefore, we use prepared statements to optimize it.
return await s.getMessagesPreparedStmt(
return await s.getMessagesV2PreparedStmt(
contentTopicSeq.join(","),
PubsubTopic(pubsubTopic.get()),
cursor,
@ -429,8 +646,8 @@ method getMessages*(
)
else:
## We will run atypical query. In this case we don't use prepared statemets
return await s.getMessagesArbitraryQuery(
contentTopicSeq, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize,
return await s.getMessagesV2ArbitraryQuery(
contentTopicSeq, pubsubTopic, cursor, startTime, endTime, maxPageSize,
ascendingOrder,
)

View File

@ -52,8 +52,10 @@ proc toIndex*(index: ArchiveCursor): Index =
proc `==`*(x, y: Index): bool =
## receiverTime plays no role in index equality
return
(x.senderTime == y.senderTime) and (x.digest == y.digest) and
(x.pubsubTopic == y.pubsubTopic)
(
(x.senderTime == y.senderTime) and (x.digest == y.digest) and
(x.pubsubTopic == y.pubsubTopic)
) or (x.hash == y.hash) # this applies to store v3 queries only
proc cmp*(x, y: Index): int =
## compares x and y

View File

@ -3,7 +3,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[options, sequtils], stew/[results, byteutils, arrayops], sqlite3_abi
import std/[options, sequtils], stew/[results, byteutils], sqlite3_abi
import
../../../common/databases/db_sqlite,
../../../common/databases/common,
@ -285,21 +285,229 @@ proc combineClauses(clauses: varargs[Option[string]]): Option[string] =
where &= " AND " & clause
return some(where)
proc whereClause(
proc whereClausev2(
cursor: bool,
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
ascending: bool,
): Option[string] =
let cursorClause =
if cursor:
let comp = if ascending: ">" else: "<"
some("(storedAt, id) " & comp & " (?, ?)")
else:
none(string)
let pubsubTopicClause =
if pubsubTopic.isNone():
none(string)
else:
some("pubsubTopic = (?)")
let contentTopicClause =
if contentTopic.len <= 0:
none(string)
else:
var where = "contentTopic IN ("
where &= "?"
for _ in 1 ..< contentTopic.len:
where &= ", ?"
where &= ")"
some(where)
let startTimeClause =
if startTime.isNone():
none(string)
else:
some("storedAt >= (?)")
let endTimeClause =
if endTime.isNone():
none(string)
else:
some("storedAt <= (?)")
return combineClauses(
cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause
)
proc selectMessagesWithLimitQueryv2(
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
): SqlQueryStr =
let order = if ascending: "ASC" else: "DESC"
var query: string
query =
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash"
query &= " FROM " & table
if where.isSome():
query &= " WHERE " & where.get()
query &= " ORDER BY storedAt " & order & ", id " & order
query &= " LIMIT " & $limit & ";"
return query
proc prepareStmt(
db: SqliteDatabase, stmt: string
): DatabaseResult[SqliteStmt[void, void]] =
var s: RawStmtPtr
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
return ok(SqliteStmt[void, void](s))
proc execSelectMessagesV2WithLimitStmt(
s: SqliteStmt,
cursor: Option[DbCursor],
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
onRowCallback: DataProc,
): DatabaseResult[void] =
let s = RawStmtPtr(s)
# Bind params
var paramIndex = 1
if cursor.isSome():
let (storedAt, id, _) = cursor.get()
checkErr bindParam(s, paramIndex, storedAt)
paramIndex += 1
checkErr bindParam(s, paramIndex, id)
paramIndex += 1
if pubsubTopic.isSome():
let pubsubTopic = toBytes(pubsubTopic.get())
checkErr bindParam(s, paramIndex, pubsubTopic)
paramIndex += 1
for topic in contentTopic:
checkErr bindParam(s, paramIndex, topic.toBytes())
paramIndex += 1
if startTime.isSome():
let time = startTime.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
if endTime.isSome():
let time = endTime.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
try:
while true:
let v = sqlite3_step(s)
case v
of SQLITE_ROW:
onRowCallback(s)
of SQLITE_DONE:
return ok()
else:
return err($sqlite3_errstr(v))
finally:
# release implicit transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
proc execSelectMessageByHash(
s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc
): DatabaseResult[void] =
let s = RawStmtPtr(s)
checkErr bindParam(s, 1, toSeq(hash))
try:
while true:
let v = sqlite3_step(s)
case v
of SQLITE_ROW:
onRowCallback(s)
of SQLITE_DONE:
return ok()
else:
return err($sqlite3_errstr(v))
finally:
# release implicit transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s)
# no errors possible
proc selectMessagesByHistoryQueryWithLimit*(
db: SqliteDatabase,
contentTopic: seq[ContentTopic],
pubsubTopic: Option[PubsubTopic],
cursor: Option[DbCursor],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
limit: uint,
ascending: bool,
): DatabaseResult[
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
] =
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
@[]
proc queryRowCallback(s: ptr sqlite3_stmt) =
let
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
message = queryRowWakuMessageCallback(
s, contentTopicCol = 1, payloadCol = 2, versionCol = 4, senderTimestampCol = 5
)
digest = queryRowDigestCallback(s, digestCol = 6)
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
messages.add((pubsubTopic, message, digest, storedAt, hash))
let query = block:
let where = whereClausev2(
cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending
)
selectMessagesWithLimitQueryv2(DbTable, where, limit, ascending)
let dbStmt = ?db.prepareStmt(query)
?dbStmt.execSelectMessagesV2WithLimitStmt(
cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback
)
dbStmt.dispose()
return ok(messages)
### Store v3 ###
proc selectMessageByHashQuery(): SqlQueryStr =
var query: string
query = "SELECT contentTopic, payload, version, timestamp, messageHash"
query &= " FROM " & DbTable
query &= " WHERE messageHash = (?)"
return query
proc whereClause(
cursor: bool,
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
hashes: seq[WakuMessageHash],
ascending: bool,
): Option[string] =
let cursorClause =
if cursor.isNone():
none(string)
else:
if cursor:
let comp = if ascending: ">" else: "<"
some("(storedAt, id) " & comp & " (?, ?)")
some("(timestamp, messageHash) " & comp & " (?, ?)")
else:
none(string)
let pubsubTopicClause =
if pubsubTopic.isNone():
@ -346,35 +554,9 @@ proc whereClause(
hashesClause,
)
proc selectMessagesWithLimitQuery(
table: string, where: Option[string], limit: uint, ascending = true
): SqlQueryStr =
let order = if ascending: "ASC" else: "DESC"
var query: string
query =
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash"
query &= " FROM " & table
if where.isSome():
query &= " WHERE " & where.get()
query &= " ORDER BY storedAt " & order & ", id " & order
query &= " LIMIT " & $limit & ";"
return query
proc prepareSelectMessagesWithlimitStmt(
db: SqliteDatabase, stmt: string
): DatabaseResult[SqliteStmt[void, void]] =
var s: RawStmtPtr
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
return ok(SqliteStmt[void, void](s))
proc execSelectMessagesWithLimitStmt(
s: SqliteStmt,
cursor: Option[DbCursor],
cursor: Option[(Timestamp, WakuMessageHash)],
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
@ -387,11 +569,11 @@ proc execSelectMessagesWithLimitStmt(
# Bind params
var paramIndex = 1
if cursor.isSome(): # cursor = storedAt, id, pubsubTopic
let (storedAt, id, _) = cursor.get()
checkErr bindParam(s, paramIndex, storedAt)
if cursor.isSome():
let (time, hash) = cursor.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
checkErr bindParam(s, paramIndex, id)
checkErr bindParam(s, paramIndex, toSeq(hash))
paramIndex += 1
if pubsubTopic.isSome():
@ -404,13 +586,7 @@ proc execSelectMessagesWithLimitStmt(
paramIndex += 1
for hash in hashes:
let bytes: array[32, byte] = hash
var byteSeq: seq[byte]
let byteCount = copyFrom(byteSeq, bytes)
assert byteCount == 32
checkErr bindParam(s, paramIndex, byteSeq)
checkErr bindParam(s, paramIndex, toSeq(hash))
paramIndex += 1
if startTime.isSome():
@ -438,11 +614,31 @@ proc execSelectMessagesWithLimitStmt(
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
proc selectMessagesByHistoryQueryWithLimit*(
proc selectMessagesWithLimitQuery(
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
): SqlQueryStr =
let order = if ascending: "ASC" else: "DESC"
var query: string
query =
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash"
query &= " FROM " & table
if where.isSome():
query &= " WHERE " & where.get()
query &= " ORDER BY storedAt " & order & ", messageHash " & order
query &= " LIMIT " & $limit & ";"
return query
proc selectMessagesByStoreQueryWithLimit*(
db: SqliteDatabase,
contentTopic: seq[ContentTopic],
pubsubTopic: Option[PubsubTopic],
cursor: Option[DbCursor],
cursor: Option[WakuMessageHash],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
hashes: seq[WakuMessageHash],
@ -451,8 +647,32 @@ proc selectMessagesByHistoryQueryWithLimit*(
): DatabaseResult[
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
] =
# Must first get the message timestamp before paginating by time
let newCursor =
if cursor.isSome() and cursor.get() != EmptyWakuMessageHash:
let hash: WakuMessageHash = cursor.get()
var wakuMessage: WakuMessage
proc queryRowCallback(s: ptr sqlite3_stmt) =
wakuMessage = queryRowWakuMessageCallback(
s, contentTopicCol = 0, payloadCol = 1, versionCol = 2, senderTimestampCol = 3
)
let query = selectMessageByHashQuery()
let dbStmt = ?db.prepareStmt(query)
?dbStmt.execSelectMessageByHash(hash, queryRowCallback)
dbStmt.dispose()
let time: Timestamp = wakuMessage.timestamp
some((time, hash))
else:
none((Timestamp, WakuMessageHash))
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
@[]
proc queryRowCallback(s: ptr sqlite3_stmt) =
let
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
@ -467,13 +687,20 @@ proc selectMessagesByHistoryQueryWithLimit*(
let query = block:
let where = whereClause(
cursor, pubsubTopic, contentTopic, startTime, endTime, hashes, ascending
newCursor.isSome(),
pubsubTopic,
contentTopic,
startTime,
endTime,
hashes,
ascending,
)
selectMessagesWithLimitQuery(DbTable, where, limit, ascending)
let dbStmt = ?db.prepareSelectMessagesWithlimitStmt(query)
selectMessagesWithLimitQuery(DbTable, where, limit, ascending, true)
let dbStmt = ?db.prepareStmt(query)
?dbStmt.execSelectMessagesWithLimitStmt(
cursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback
newCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback
)
dbStmt.dispose()

View File

@ -83,6 +83,32 @@ method getAllMessages*(
## Retrieve all messages from the store.
return s.db.selectAllMessages()
method getMessagesV2*(
s: SqliteDriver,
contentTopic = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
echo "here"
let cursor = cursor.map(toDbCursor)
let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit(
contentTopic,
pubsubTopic,
cursor,
startTime,
endTime,
limit = maxPageSize,
ascending = ascendingOrder,
)
return rowsRes
method getMessages*(
s: SqliteDriver,
contentTopic = newSeq[ContentTopic](0),
@ -94,9 +120,13 @@ method getMessages*(
maxPageSize = DefaultPageSize,
ascendingOrder = true,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
let cursor = cursor.map(toDbCursor)
let cursor =
if cursor.isSome():
some(cursor.get().hash)
else:
none(WakuMessageHash)
let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit(
let rowsRes = s.db.selectMessagesByStoreQueryWithLimit(
contentTopic,
pubsubTopic,
cursor,

View File

@ -11,6 +11,11 @@ import ../topics, ./message
type WakuMessageHash* = array[32, byte]
const EmptyWakuMessageHash*: WakuMessageHash = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
]
converter fromBytes*(array: openArray[byte]): WakuMessageHash =
var hash: WakuMessageHash
let copiedBytes = copyFrom(hash, array)

View File

@ -5,17 +5,7 @@ else:
import std/options, stew/results, chronicles, chronos, metrics, bearssl/rand
import
../node/peer_manager,
../utils/requests,
./protocol_metrics,
./common,
./rpc,
./rpc_codec
when defined(waku_exp_store_resume):
import std/[sequtils, times]
import ../waku_archive
import ../waku_core/message/digest
../node/peer_manager, ../utils/requests, ./protocol_metrics, ./common, ./rpc_codec
logScope:
topics = "waku store client"
@ -27,216 +17,48 @@ type WakuStoreClient* = ref object
peerManager: PeerManager
rng: ref rand.HmacDrbgContext
# TODO: Move outside of the client
when defined(waku_exp_store_resume):
store: ArchiveDriver
proc new*(
T: type WakuStoreClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext
): T =
WakuStoreClient(peerManager: peerManager, rng: rng)
proc sendHistoryQueryRPC(
w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo
): Future[HistoryResult] {.async, gcsafe.} =
let connOpt = await w.peerManager.dialPeer(peer, WakuStoreCodec)
if connOpt.isNone():
waku_store_errors.inc(labelValues = [dialFailure])
return err(HistoryError(kind: HistoryErrorKind.PEER_DIAL_FAILURE, address: $peer))
proc sendStoreRequest(
self: WakuStoreClient, request: StoreQueryRequest, connection: Connection
): Future[StoreQueryResult] {.async, gcsafe.} =
var req = request
let connection = connOpt.get()
req.requestId = generateRequestId(self.rng)
let reqRpc = HistoryRPC(requestId: generateRequestId(w.rng), query: some(req.toRPC()))
await connection.writeLP(reqRpc.encode().buffer)
let writeRes = catch:
await connection.writeLP(req.encode().buffer)
if writeRes.isErr():
return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: writeRes.error.msg))
#TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail.
# Need to find a workaround for this.
let buf = await connection.readLp(DefaultMaxRpcSize.int)
let respDecodeRes = HistoryRPC.decode(buf)
if respDecodeRes.isErr():
let readRes = catch:
await connection.readLp(DefaultMaxRpcSize.int)
let buf = readRes.valueOr:
return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: error.msg))
let res = StoreQueryResponse.decode(buf).valueOr:
waku_store_errors.inc(labelValues = [decodeRpcFailure])
return
err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure))
return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: decodeRpcFailure))
let respRpc = respDecodeRes.get()
if res.statusCode != uint32(StatusCode.SUCCESS):
waku_store_errors.inc(labelValues = [res.statusDesc])
return err(StoreError.new(res.statusCode, res.statusDesc))
# Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0))
# TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK))
# and rework the protobuf parsing to return Option[T] when empty values are received
if respRpc.response.isNone():
waku_store_errors.inc(labelValues = [emptyRpcResponseFailure])
return err(
HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: emptyRpcResponseFailure)
)
let resp = respRpc.response.get()
return resp.toAPI()
return ok(res)
proc query*(
w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo
): Future[HistoryResult] {.async, gcsafe.} =
return await w.sendHistoryQueryRPC(req, peer)
self: WakuStoreClient, request: StoreQueryRequest, peer: RemotePeerInfo
): Future[StoreQueryResult] {.async, gcsafe.} =
if request.paginationCursor.isSome() and request.paginationCursor.get() == EmptyCursor:
return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: "invalid cursor"))
# TODO: Move outside of the client
when defined(waku_exp_store_resume):
## Resume store
let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr:
waku_store_errors.inc(labelValues = [dialFailure])
const StoreResumeTimeWindowOffset: Timestamp = getNanosecondTime(20)
## Adjust the time window with an offset of 20 seconds
return err(StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer))
proc new*(
T: type WakuStoreClient,
peerManager: PeerManager,
rng: ref rand.HmacDrbgContext,
store: ArchiveDriver,
): T =
WakuStoreClient(peerManager: peerManager, rng: rng, store: store)
proc queryAll(
w: WakuStoreClient, query: HistoryQuery, peer: RemotePeerInfo
): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} =
## A thin wrapper for query. Sends the query to the given peer. when the query has a valid pagingInfo,
## it retrieves the historical messages in pages.
## Returns all the fetched messages, if error occurs, returns an error string
# Make a copy of the query
var req = query
var messageList: seq[WakuMessage] = @[]
while true:
let queryRes = await w.query(req, peer)
if queryRes.isErr():
return err($queryRes.error)
let response = queryRes.get()
messageList.add(response.messages)
# Check whether it is the last page
if response.cursor.isNone():
break
# Update paging cursor
req.cursor = response.cursor
return ok(messageList)
proc queryLoop(
w: WakuStoreClient, req: HistoryQuery, peers: seq[RemotePeerInfo]
): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} =
## Loops through the peers candidate list in order and sends the query to each
##
## Once all responses have been received, the retrieved messages are consolidated into one deduplicated list.
## if no messages have been retrieved, the returned future will resolve into a result holding an empty seq.
let queryFuturesList = peers.mapIt(w.queryAll(req, it))
await allFutures(queryFuturesList)
let messagesList = queryFuturesList
.map(
proc(fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] =
try:
# fut.read() can raise a CatchableError
# These futures have been awaited before using allFutures(). Call completed() just as a sanity check.
if not fut.completed() or fut.read().isErr():
return @[]
fut.read().value
except CatchableError:
return @[]
)
.concat()
.deduplicate()
return ok(messagesList)
proc put(
store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage
): Result[void, string] =
let
digest = waku_archive.computeDigest(message)
messageHash = computeMessageHash(pubsubTopic, message)
receivedTime =
if message.timestamp > 0:
message.timestamp
else:
getNanosecondTime(getTime().toUnixFloat())
store.put(pubsubTopic, message, digest, messageHash, receivedTime)
proc resume*(
w: WakuStoreClient,
peerList = none(seq[RemotePeerInfo]),
pageSize = DefaultPageSize,
pubsubTopic = DefaultPubsubTopic,
): Future[WakuStoreResult[uint64]] {.async, gcsafe.} =
## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online
## messages are stored in the store node's messages field and in the message db
## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message
## an offset of 20 second is added to the time window to count for nodes asynchrony
## peerList indicates the list of peers to query from.
## The history is fetched from all available peers in this list and then consolidated into one deduplicated list.
## Such candidates should be found through a discovery method (to be developed).
## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from.
## The history gets fetched successfully if the dialed peer has been online during the queried time window.
## the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string
# If store has not been provided, don't even try
if w.store.isNil():
return err("store not provided (nil)")
# NOTE: Original implementation is based on the message's sender timestamp. At the moment
# of writing, the sqlite store implementation returns the last message's receiver
# timestamp.
# lastSeenTime = lastSeenItem.get().msg.timestamp
let
lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0))
now = getNanosecondTime(getTime().toUnixFloat())
debug "resuming with offline time window",
lastSeenTime = lastSeenTime, currentTime = now
let
queryEndTime = now + StoreResumeTimeWindowOffset
queryStartTime = max(lastSeenTime - StoreResumeTimeWindowOffset, 0)
let req = HistoryQuery(
pubsubTopic: some(pubsubTopic),
startTime: some(queryStartTime),
endTime: some(queryEndTime),
pageSize: uint64(pageSize),
direction: default(),
)
var res: WakuStoreResult[seq[WakuMessage]]
if peerList.isSome():
debug "trying the candidate list to fetch the history"
res = await w.queryLoop(req, peerList.get())
else:
debug "no candidate list is provided, selecting a random peer"
# if no peerList is set then query from one of the peers stored in the peer manager
let peerOpt = w.peerManager.selectPeer(WakuStoreCodec)
if peerOpt.isNone():
warn "no suitable remote peers"
waku_store_errors.inc(labelValues = [peerNotFoundFailure])
return err("no suitable remote peers")
debug "a peer is selected from peer manager"
res = await w.queryAll(req, peerOpt.get())
if res.isErr():
debug "failed to resume the history"
return err("failed to resume the history")
# Save the retrieved messages in the store
var added: uint = 0
for msg in res.get():
let putStoreRes = w.store.put(pubsubTopic, msg)
if putStoreRes.isErr():
continue
added.inc()
return ok(added)
return await self.sendStoreRequest(request, connection)

View File

@ -3,57 +3,62 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[options, sequtils], stew/results, stew/byteutils, nimcrypto/sha2
import std/[options], stew/results
import ../waku_core, ../common/paging
const
WakuStoreCodec* = "/vac/waku/store/2.0.0-beta4"
WakuStoreCodec* = "/vac/waku/store-query/3.0.0"
DefaultPageSize*: uint64 = 20
MaxPageSize*: uint64 = 100
EmptyCursor*: WakuMessageHash = EmptyWakuMessageHash
type WakuStoreResult*[T] = Result[T, string]
## Waku message digest
type MessageDigest* = MDigest[256]
proc computeDigest*(msg: WakuMessage): MessageDigest =
var ctx: sha256
ctx.init()
defer:
ctx.clear()
ctx.update(msg.contentTopic.toBytes())
ctx.update(msg.payload)
# Computes the hash
return ctx.finish()
## Public API types
type
HistoryCursor* = object
pubsubTopic*: PubsubTopic
senderTime*: Timestamp
storeTime*: Timestamp
digest*: MessageDigest
StoreQueryRequest* = object
requestId*: string
includeData*: bool
HistoryQuery* = object
pubsubTopic*: Option[PubsubTopic]
contentTopics*: seq[ContentTopic]
cursor*: Option[HistoryCursor]
startTime*: Option[Timestamp]
endTime*: Option[Timestamp]
pageSize*: uint64
direction*: PagingDirection
HistoryResponse* = object
messages*: seq[WakuMessage]
cursor*: Option[HistoryCursor]
messageHashes*: seq[WakuMessageHash]
HistoryErrorKind* {.pure.} = enum
paginationCursor*: Option[WakuMessageHash]
paginationForward*: PagingDirection
paginationLimit*: Option[uint64]
WakuMessageKeyValue* = object
messageHash*: WakuMessageHash
message*: WakuMessage
StoreQueryResponse* = object
requestId*: string
statusCode*: uint32
statusDesc*: string
messages*: seq[WakuMessageKeyValue]
paginationCursor*: Option[WakuMessageHash]
StatusCode* {.pure.} = enum
UNKNOWN = uint32(000)
SUCCESS = uint32(200)
BAD_RESPONSE = uint32(300)
BAD_REQUEST = uint32(400)
TOO_MANY_REQUESTS = uint32(429)
SERVICE_UNAVAILABLE = uint32(503)
PEER_DIAL_FAILURE = uint32(504)
ErrorCode* {.pure.} = enum
UNKNOWN = uint32(000)
BAD_RESPONSE = uint32(300)
BAD_REQUEST = uint32(400)
@ -61,49 +66,55 @@ type
SERVICE_UNAVAILABLE = uint32(503)
PEER_DIAL_FAILURE = uint32(504)
HistoryError* = object
case kind*: HistoryErrorKind
of PEER_DIAL_FAILURE:
StoreError* = object
case kind*: ErrorCode
of ErrorCode.PEER_DIAL_FAILURE:
address*: string
of BAD_RESPONSE, BAD_REQUEST:
of ErrorCode.BAD_RESPONSE, ErrorCode.BAD_REQUEST:
cause*: string
else:
discard
HistoryResult* = Result[HistoryResponse, HistoryError]
StoreQueryResult* = Result[StoreQueryResponse, StoreError]
proc into*(errCode: ErrorCode): StatusCode =
StatusCode(uint32(errCode))
proc new*(T: type StoreError, code: uint32, desc: string): T =
let kind = ErrorCode.parse(code)
proc parse*(T: type HistoryErrorKind, kind: uint32): T =
case kind
of 000, 200, 300, 400, 429, 503:
HistoryErrorKind(kind)
of ErrorCode.BAD_RESPONSE:
return StoreError(kind: kind, cause: desc)
of ErrorCode.BAD_REQUEST:
return StoreError(kind: kind, cause: desc)
of ErrorCode.TOO_MANY_REQUESTS:
return StoreError(kind: kind)
of ErrorCode.SERVICE_UNAVAILABLE:
return StoreError(kind: kind)
of ErrorCode.PEER_DIAL_FAILURE:
return StoreError(kind: kind, address: desc)
of ErrorCode.UNKNOWN:
return StoreError(kind: kind)
proc parse*(T: type ErrorCode, kind: uint32): T =
case kind
of 000, 300, 400, 429, 503, 504:
ErrorCode(kind)
else:
HistoryErrorKind.UNKNOWN
ErrorCode.UNKNOWN
proc `$`*(err: HistoryError): string =
proc `$`*(err: StoreError): string =
case err.kind
of HistoryErrorKind.PEER_DIAL_FAILURE:
of ErrorCode.PEER_DIAL_FAILURE:
"PEER_DIAL_FAILURE: " & err.address
of HistoryErrorKind.BAD_RESPONSE:
of ErrorCode.BAD_RESPONSE:
"BAD_RESPONSE: " & err.cause
of HistoryErrorKind.BAD_REQUEST:
of ErrorCode.BAD_REQUEST:
"BAD_REQUEST: " & err.cause
of HistoryErrorKind.TOO_MANY_REQUESTS:
of ErrorCode.TOO_MANY_REQUESTS:
"TOO_MANY_REQUESTS"
of HistoryErrorKind.SERVICE_UNAVAILABLE:
of ErrorCode.SERVICE_UNAVAILABLE:
"SERVICE_UNAVAILABLE"
of HistoryErrorKind.UNKNOWN:
of ErrorCode.UNKNOWN:
"UNKNOWN"
proc checkHistCursor*(self: HistoryCursor): Result[void, HistoryError] =
if self.pubsubTopic.len == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "empty pubsubTopic"))
if self.senderTime == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "invalid senderTime"))
if self.storeTime == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "invalid storeTime"))
if self.digest.data.all(
proc(x: byte): bool =
x == 0
):
return err(HistoryError(kind: BAD_REQUEST, cause: "empty digest"))
return ok()

View File

@ -21,7 +21,6 @@ import
../waku_core,
../node/peer_manager,
./common,
./rpc,
./rpc_codec,
./protocol_metrics,
../common/ratelimit,
@ -33,105 +32,109 @@ logScope:
const MaxMessageTimestampVariance* = getNanoSecondTime(20)
# 20 seconds maximum allowable sender timestamp "drift"
type HistoryQueryHandler* =
proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.}
type StoreQueryRequestHandler* =
proc(req: StoreQueryRequest): Future[StoreQueryResult] {.async, gcsafe.}
type WakuStore* = ref object of LPProtocol
peerManager: PeerManager
rng: ref rand.HmacDrbgContext
queryHandler*: HistoryQueryHandler
requestHandler*: StoreQueryRequestHandler
requestRateLimiter*: Option[TokenBucket]
## Protocol
proc initProtocolHandler(ws: WakuStore) =
proc handler(conn: Connection, proto: string) {.async.} =
let buf = await conn.readLp(DefaultMaxRpcSize.int)
proc handleQueryRequest*(
self: WakuStore, requestor: PeerId, raw_request: seq[byte]
): Future[seq[byte]] {.async.} =
var res = StoreQueryResponse()
let decodeRes = HistoryRPC.decode(buf)
if decodeRes.isErr():
error "failed to decode rpc", peerId = $conn.peerId
waku_store_errors.inc(labelValues = [decodeRpcFailure])
# TODO: Return (BAD_REQUEST, cause: "decode rpc failed")
let req = StoreQueryRequest.decode(raw_request).valueOr:
error "failed to decode rpc", peerId = requestor
waku_store_errors.inc(labelValues = [decodeRpcFailure])
res.statusCode = uint32(ErrorCode.BAD_REQUEST)
res.statusDesc = "decode rpc failed"
return res.encode().buffer
let requestId = req.requestId
if self.requestRateLimiter.isSome() and not self.requestRateLimiter.get().tryConsume(
1
):
debug "store query request rejected due rate limit exceeded",
peerId = $requestor, requestId = requestId
res.statusCode = uint32(ErrorCode.TOO_MANY_REQUESTS)
res.statusDesc = $ErrorCode.TOO_MANY_REQUESTS
waku_service_requests_rejected.inc(labelValues = ["Store"])
return res.encode().buffer
waku_service_requests.inc(labelValues = ["Store"])
info "received store query request",
peerId = requestor, requestId = requestId, request = req
waku_store_queries.inc()
let queryResult = await self.requestHandler(req)
res = queryResult.valueOr:
error "store query failed",
peerId = requestor, requestId = requestId, error = queryResult.error
res.statusCode = uint32(queryResult.error.kind)
res.statusDesc = $queryResult.error
return res.encode().buffer
res.requestId = requestId
res.statusCode = 200
info "sending store query response",
peerId = requestor, requestId = requestId, messages = res.messages.len
return res.encode().buffer
proc initProtocolHandler(self: WakuStore) =
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
let readRes = catch:
await conn.readLp(DefaultMaxRpcSize.int)
let reqBuf = readRes.valueOr:
error "Connection read error", error = error.msg
return
let reqRpc = decodeRes.value
let resBuf = await self.handleQueryRequest(conn.peerId, reqBuf)
if reqRpc.query.isNone():
error "empty query rpc", peerId = $conn.peerId, requestId = reqRpc.requestId
waku_store_errors.inc(labelValues = [emptyRpcQueryFailure])
# TODO: Return (BAD_REQUEST, cause: "empty query")
let writeRes = catch:
await conn.writeLp(resBuf)
if writeRes.isErr():
error "Connection write error", error = writeRes.error.msg
return
if ws.requestRateLimiter.isSome() and not ws.requestRateLimiter.get().tryConsume(1):
trace "store query request rejected due rate limit exceeded",
peerId = $conn.peerId, requestId = reqRpc.requestId
let error = HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS).toRPC()
let response = HistoryResponseRPC(error: error)
let rpc = HistoryRPC(requestId: reqRpc.requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
waku_service_requests_rejected.inc(labelValues = ["Store"])
return
waku_service_requests.inc(labelValues = ["Store"])
let
requestId = reqRpc.requestId
request = reqRpc.query.get().toAPI()
info "received history query",
peerId = conn.peerId, requestId = requestId, query = request
waku_store_queries.inc()
var responseRes: HistoryResult
try:
responseRes = await ws.queryHandler(request)
except Exception:
error "history query failed",
peerId = $conn.peerId, requestId = requestId, error = getCurrentExceptionMsg()
let error = HistoryError(kind: HistoryErrorKind.UNKNOWN).toRPC()
let response = HistoryResponseRPC(error: error)
let rpc = HistoryRPC(requestId: requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
return
if responseRes.isErr():
error "history query failed",
peerId = $conn.peerId, requestId = requestId, error = responseRes.error
let response = responseRes.toRPC()
let rpc = HistoryRPC(requestId: requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
return
let response = responseRes.toRPC()
info "sending history response",
peerId = conn.peerId, requestId = requestId, messages = response.messages.len
let rpc = HistoryRPC(requestId: requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
ws.handler = handler
ws.codec = WakuStoreCodec
self.handler = handler
self.codec = WakuStoreCodec
proc new*(
T: type WakuStore,
peerManager: PeerManager,
rng: ref rand.HmacDrbgContext,
queryHandler: HistoryQueryHandler,
requestHandler: StoreQueryRequestHandler,
rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](),
): T =
# Raise a defect if history query handler is nil
if queryHandler.isNil():
if requestHandler.isNil(): # TODO use an Option instead ???
raise newException(NilAccessDefect, "history query handler is nil")
let ws = WakuStore(
let store = WakuStore(
rng: rng,
peerManager: peerManager,
queryHandler: queryHandler,
requestHandler: requestHandler,
requestRateLimiter: newTokenBucket(rateLimitSetting),
)
ws.initProtocolHandler()
ws
store.initProtocolHandler()
return store

View File

@ -3,256 +3,208 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/options, nimcrypto/hash
import ../common/[protobuf, paging], ../waku_core, ./common, ./rpc
import std/options, stew/arrayops, nimcrypto/hash
import ../common/[protobuf, paging], ../waku_core, ./common
const DefaultMaxRpcSize* = -1
## Pagination
### Request ###
proc encode*(index: PagingIndexRPC): ProtoBuffer =
## Encode an Index object into a ProtoBuffer
## returns the resultant ProtoBuffer
proc encode*(req: StoreQueryRequest): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, index.digest.data)
pb.write3(2, zint64(index.receiverTime))
pb.write3(3, zint64(index.senderTime))
pb.write3(4, index.pubsubTopic)
pb.finish3()
pb.write3(1, req.requestId)
pb.write3(2, req.includeData)
pb
pb.write3(3, req.pubsubTopic)
proc decode*(T: type PagingIndexRPC, buffer: seq[byte]): ProtobufResult[T] =
## creates and returns an Index object out of buffer
var rpc = PagingIndexRPC()
let pb = initProtoBuffer(buffer)
for contentTopic in req.contentTopics:
pb.write3(4, contentTopic)
var data: seq[byte]
if not ?pb.getField(1, data):
return err(ProtobufError.missingRequiredField("digest"))
else:
var digest = MessageDigest()
for count, b in data:
digest.data[count] = b
rpc.digest = digest
var receiverTime: zint64
if not ?pb.getField(2, receiverTime):
return err(ProtobufError.missingRequiredField("receiver_time"))
else:
rpc.receiverTime = int64(receiverTime)
var senderTime: zint64
if not ?pb.getField(3, senderTime):
return err(ProtobufError.missingRequiredField("sender_time"))
else:
rpc.senderTime = int64(senderTime)
var pubsubTopic: string
if not ?pb.getField(4, pubsubTopic):
return err(ProtobufError.missingRequiredField("pubsub_topic"))
else:
rpc.pubsubTopic = pubsubTopic
ok(rpc)
proc encode*(rpc: PagingInfoRPC): ProtoBuffer =
## Encodes a PagingInfo object into a ProtoBuffer
## returns the resultant ProtoBuffer
var pb = initProtoBuffer()
pb.write3(1, rpc.pageSize)
pb.write3(2, rpc.cursor.map(encode))
pb.write3(
3,
rpc.direction.map(
proc(d: PagingDirection): uint32 =
uint32(ord(d))
),
)
pb.finish3()
pb
proc decode*(T: type PagingInfoRPC, buffer: seq[byte]): ProtobufResult[T] =
## creates and returns a PagingInfo object out of buffer
var rpc = PagingInfoRPC()
let pb = initProtoBuffer(buffer)
var pageSize: uint64
if not ?pb.getField(1, pageSize):
rpc.pageSize = none(uint64)
else:
rpc.pageSize = some(pageSize)
var cursorBuffer: seq[byte]
if not ?pb.getField(2, cursorBuffer):
rpc.cursor = none(PagingIndexRPC)
else:
let cursor = ?PagingIndexRPC.decode(cursorBuffer)
rpc.cursor = some(cursor)
var direction: uint32
if not ?pb.getField(3, direction):
rpc.direction = none(PagingDirection)
else:
rpc.direction = some(PagingDirection(direction))
ok(rpc)
## Wire protocol
proc encode*(rpc: HistoryContentFilterRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, rpc.contentTopic)
pb.finish3()
pb
proc decode*(T: type HistoryContentFilterRPC, buffer: seq[byte]): ProtobufResult[T] =
let pb = initProtoBuffer(buffer)
var contentTopic: ContentTopic
if not ?pb.getField(1, contentTopic):
return err(ProtobufError.missingRequiredField("content_topic"))
ok(HistoryContentFilterRPC(contentTopic: contentTopic))
proc encode*(rpc: HistoryQueryRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(2, rpc.pubsubTopic)
for filter in rpc.contentFilters:
pb.write3(3, filter.encode())
pb.write3(4, rpc.pagingInfo.map(encode))
pb.write3(
5,
rpc.startTime.map(
req.startTime.map(
proc(time: int64): zint64 =
zint64(time)
),
)
pb.write3(
6,
rpc.endTime.map(
req.endTime.map(
proc(time: int64): zint64 =
zint64(time)
),
)
for hash in req.messagehashes:
pb.write3(7, hash)
pb.write3(8, req.paginationCursor)
pb.write3(9, uint32(req.paginationForward))
pb.write3(10, req.paginationLimit)
pb.finish3()
pb
return pb
proc decode*(T: type HistoryQueryRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryQueryRPC()
proc decode*(
T: type StoreQueryRequest, buffer: seq[byte]
): ProtobufResult[StoreQueryRequest] =
var req = StoreQueryRequest()
let pb = initProtoBuffer(buffer)
if not ?pb.getField(1, req.requestId):
return err(ProtobufError.missingRequiredField("request_id"))
var inclData: uint
if not ?pb.getField(2, inclData):
req.includeData = false
else:
req.includeData = inclData == 1
var pubsubTopic: string
if not ?pb.getField(2, pubsubTopic):
rpc.pubsubTopic = none(string)
if not ?pb.getField(3, pubsubTopic):
req.pubsubTopic = none(string)
else:
rpc.pubsubTopic = some(pubsubTopic)
req.pubsubTopic = some(pubsubTopic)
var buffs: seq[seq[byte]]
if not ?pb.getRepeatedField(3, buffs):
rpc.contentFilters = @[]
var topics: seq[string]
if not ?pb.getRepeatedField(4, topics):
req.contentTopics = @[]
else:
for pb in buffs:
let filter = ?HistoryContentFilterRPC.decode(pb)
rpc.contentFilters.add(filter)
req.contentTopics = topics
var pagingInfoBuffer: seq[byte]
if not ?pb.getField(4, pagingInfoBuffer):
rpc.pagingInfo = none(PagingInfoRPC)
var start: zint64
if not ?pb.getField(5, start):
req.startTime = none(Timestamp)
else:
let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer)
rpc.pagingInfo = some(pagingInfo)
var startTime: zint64
if not ?pb.getField(5, startTime):
rpc.startTime = none(int64)
else:
rpc.startTime = some(int64(startTime))
req.startTime = some(Timestamp(int64(start)))
var endTime: zint64
if not ?pb.getField(6, endTime):
rpc.endTime = none(int64)
req.endTime = none(Timestamp)
else:
rpc.endTime = some(int64(endTime))
req.endTime = some(Timestamp(int64(endTime)))
ok(rpc)
var buffer: seq[seq[byte]]
if not ?pb.getRepeatedField(7, buffer):
req.messageHashes = @[]
else:
req.messageHashes = newSeqOfCap[WakuMessageHash](buffer.len)
for buf in buffer:
var hash: WakuMessageHash
discard copyFrom[byte](hash, buf)
req.messageHashes.add(hash)
proc encode*(response: HistoryResponseRPC): ProtoBuffer =
var cursor: seq[byte]
if not ?pb.getField(8, cursor):
req.paginationCursor = none(WakuMessageHash)
else:
var hash: WakuMessageHash
discard copyFrom[byte](hash, cursor)
req.paginationCursor = some(hash)
var paging: uint32
if not ?pb.getField(9, paging):
req.paginationForward = PagingDirection.default()
else:
req.paginationForward = PagingDirection(paging)
var limit: uint64
if not ?pb.getField(10, limit):
req.paginationLimit = none(uint64)
else:
req.paginationLimit = some(limit)
return ok(req)
### Response ###
proc encode*(keyValue: WakuMessageKeyValue): ProtoBuffer =
var pb = initProtoBuffer()
for rpc in response.messages:
pb.write3(2, rpc.encode())
pb.write3(1, keyValue.messageHash)
pb.write3(2, keyValue.message.encode())
pb.write3(3, response.pagingInfo.map(encode))
pb.write3(4, uint32(ord(response.error)))
pb.finish3()
pb
return pb
proc decode*(T: type HistoryResponseRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryResponseRPC()
let pb = initProtoBuffer(buffer)
var messages: seq[seq[byte]]
if ?pb.getRepeatedField(2, messages):
for pb in messages:
let message = ?WakuMessage.decode(pb)
rpc.messages.add(message)
else:
rpc.messages = @[]
var pagingInfoBuffer: seq[byte]
if ?pb.getField(3, pagingInfoBuffer):
let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer)
rpc.pagingInfo = some(pagingInfo)
else:
rpc.pagingInfo = none(PagingInfoRPC)
var error: uint32
if not ?pb.getField(4, error):
return err(ProtobufError.missingRequiredField("error"))
else:
rpc.error = HistoryResponseErrorRPC.parse(error)
ok(rpc)
proc encode*(rpc: HistoryRPC): ProtoBuffer =
proc encode*(res: StoreQueryResponse): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, rpc.requestId)
pb.write3(2, rpc.query.map(encode))
pb.write3(3, rpc.response.map(encode))
pb.write3(1, res.requestId)
pb.write3(2, res.statusCode)
pb.write3(3, res.statusDesc)
for msg in res.messages:
pb.write3(4, msg.encode())
pb.write3(5, res.paginationCursor)
pb.finish3()
pb
return pb
proc decode*(T: type HistoryRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryRPC()
proc decode*(
T: type WakuMessageKeyValue, buffer: seq[byte]
): ProtobufResult[WakuMessageKeyValue] =
var keyValue = WakuMessageKeyValue()
let pb = initProtoBuffer(buffer)
if not ?pb.getField(1, rpc.requestId):
var buf: seq[byte]
if not ?pb.getField(1, buf):
return err(ProtobufError.missingRequiredField("message_hash"))
else:
var hash: WakuMessageHash
discard copyFrom[byte](hash, buf)
keyValue.messagehash = hash
var proto: ProtoBuffer
if not ?pb.getField(2, proto):
return err(ProtobufError.missingRequiredField("message"))
else:
keyValue.message = ?WakuMessage.decode(proto.buffer)
return ok(keyValue)
proc decode*(
T: type StoreQueryResponse, buffer: seq[byte]
): ProtobufResult[StoreQueryResponse] =
var res = StoreQueryResponse()
let pb = initProtoBuffer(buffer)
if not ?pb.getField(1, res.requestId):
return err(ProtobufError.missingRequiredField("request_id"))
var queryBuffer: seq[byte]
if not ?pb.getField(2, queryBuffer):
rpc.query = none(HistoryQueryRPC)
var code: uint32
if not ?pb.getField(2, code):
return err(ProtobufError.missingRequiredField("status_code"))
else:
let query = ?HistoryQueryRPC.decode(queryBuffer)
rpc.query = some(query)
res.statusCode = code
var responseBuffer: seq[byte]
if not ?pb.getField(3, responseBuffer):
rpc.response = none(HistoryResponseRPC)
var desc: string
if not ?pb.getField(3, desc):
return err(ProtobufError.missingRequiredField("status_desc"))
else:
let response = ?HistoryResponseRPC.decode(responseBuffer)
rpc.response = some(response)
res.statusDesc = desc
ok(rpc)
var buffer: seq[seq[byte]]
if not ?pb.getRepeatedField(4, buffer):
res.messages = @[]
else:
res.messages = newSeqOfCap[WakuMessageKeyValue](buffer.len)
for buf in buffer:
let msg = ?WakuMessageKeyValue.decode(buf)
res.messages.add(msg)
var cursor: seq[byte]
if not ?pb.getField(5, cursor):
res.paginationCursor = none(WakuMessageHash)
else:
var hash: WakuMessageHash
discard copyFrom[byte](hash, cursor)
res.paginationCursor = some(hash)
return ok(res)

View File

@ -13,19 +13,25 @@
## stored by that local store node.
##
import stew/results, chronos, chronicles
import stew/results, chronos
import ./protocol, ./common
proc handleSelfStoreRequest*(
self: WakuStore, histQuery: HistoryQuery
): Future[WakuStoreResult[HistoryResponse]] {.async.} =
self: WakuStore, req: StoreQueryRequest
): Future[WakuStoreResult[StoreQueryResponse]] {.async.} =
## Handles the store requests made by the node to itself.
## Normally used in REST-store requests
try:
let resp: HistoryResponse = (await self.queryHandler(histQuery)).valueOr:
return err("error in handleSelfStoreRequest: " & $error)
let handlerResult = catch:
await self.requestHandler(req)
return WakuStoreResult[HistoryResponse].ok(resp)
except Exception:
return err("exception in handleSelfStoreRequest: " & getCurrentExceptionMsg())
let resResult =
if handlerResult.isErr():
return err("exception in handleSelfStoreRequest: " & handlerResult.error.msg)
else:
handlerResult.get()
let res = resResult.valueOr:
return err("error in handleSelfStoreRequest: " & $error)
return ok(res)

View File

@ -0,0 +1,3 @@
import ./waku_store_legacy/common, ./waku_store_legacy/protocol
export common, protocol

View File

@ -0,0 +1,242 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/options, stew/results, chronicles, chronos, metrics, bearssl/rand
import
../node/peer_manager,
../utils/requests,
./protocol_metrics,
./common,
./rpc,
./rpc_codec
when defined(waku_exp_store_resume):
import std/[sequtils, times]
import ../waku_archive
import ../waku_core/message/digest
logScope:
topics = "waku legacy store client"
const DefaultPageSize*: uint = 20
# A recommended default number of waku messages per page
type WakuStoreClient* = ref object
peerManager: PeerManager
rng: ref rand.HmacDrbgContext
# TODO: Move outside of the client
when defined(waku_exp_store_resume):
store: ArchiveDriver
proc new*(
T: type WakuStoreClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext
): T =
WakuStoreClient(peerManager: peerManager, rng: rng)
proc sendHistoryQueryRPC(
w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo
): Future[HistoryResult] {.async, gcsafe.} =
let connOpt = await w.peerManager.dialPeer(peer, WakuStoreCodec)
if connOpt.isNone():
waku_legacy_store_errors.inc(labelValues = [dialFailure])
return err(HistoryError(kind: HistoryErrorKind.PEER_DIAL_FAILURE, address: $peer))
let connection = connOpt.get()
let reqRpc = HistoryRPC(requestId: generateRequestId(w.rng), query: some(req.toRPC()))
await connection.writeLP(reqRpc.encode().buffer)
#TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail.
# Need to find a workaround for this.
let buf = await connection.readLp(DefaultMaxRpcSize.int)
let respDecodeRes = HistoryRPC.decode(buf)
if respDecodeRes.isErr():
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
return
err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure))
let respRpc = respDecodeRes.get()
# Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0))
# TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK))
# and rework the protobuf parsing to return Option[T] when empty values are received
if respRpc.response.isNone():
waku_legacy_store_errors.inc(labelValues = [emptyRpcResponseFailure])
return err(
HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: emptyRpcResponseFailure)
)
let resp = respRpc.response.get()
return resp.toAPI()
proc query*(
w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo
): Future[HistoryResult] {.async, gcsafe.} =
return await w.sendHistoryQueryRPC(req, peer)
# TODO: Move outside of the client
when defined(waku_exp_store_resume):
## Resume store
const StoreResumeTimeWindowOffset: Timestamp = getNanosecondTime(20)
## Adjust the time window with an offset of 20 seconds
proc new*(
T: type WakuStoreClient,
peerManager: PeerManager,
rng: ref rand.HmacDrbgContext,
store: ArchiveDriver,
): T =
WakuStoreClient(peerManager: peerManager, rng: rng, store: store)
proc queryAll(
w: WakuStoreClient, query: HistoryQuery, peer: RemotePeerInfo
): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} =
## A thin wrapper for query. Sends the query to the given peer. when the query has a valid pagingInfo,
## it retrieves the historical messages in pages.
## Returns all the fetched messages, if error occurs, returns an error string
# Make a copy of the query
var req = query
var messageList: seq[WakuMessage] = @[]
while true:
let queryRes = await w.query(req, peer)
if queryRes.isErr():
return err($queryRes.error)
let response = queryRes.get()
messageList.add(response.messages)
# Check whether it is the last page
if response.cursor.isNone():
break
# Update paging cursor
req.cursor = response.cursor
return ok(messageList)
proc queryLoop(
w: WakuStoreClient, req: HistoryQuery, peers: seq[RemotePeerInfo]
): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} =
## Loops through the peers candidate list in order and sends the query to each
##
## Once all responses have been received, the retrieved messages are consolidated into one deduplicated list.
## if no messages have been retrieved, the returned future will resolve into a result holding an empty seq.
let queryFuturesList = peers.mapIt(w.queryAll(req, it))
await allFutures(queryFuturesList)
let messagesList = queryFuturesList
.map(
proc(fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] =
try:
# fut.read() can raise a CatchableError
# These futures have been awaited before using allFutures(). Call completed() just as a sanity check.
if not fut.completed() or fut.read().isErr():
return @[]
fut.read().value
except CatchableError:
return @[]
)
.concat()
.deduplicate()
return ok(messagesList)
proc put(
store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage
): Result[void, string] =
let
digest = waku_archive.computeDigest(message)
messageHash = computeMessageHash(pubsubTopic, message)
receivedTime =
if message.timestamp > 0:
message.timestamp
else:
getNanosecondTime(getTime().toUnixFloat())
store.put(pubsubTopic, message, digest, messageHash, receivedTime)
proc resume*(
w: WakuStoreClient,
peerList = none(seq[RemotePeerInfo]),
pageSize = DefaultPageSize,
pubsubTopic = DefaultPubsubTopic,
): Future[WakuStoreResult[uint64]] {.async, gcsafe.} =
## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online
## messages are stored in the store node's messages field and in the message db
## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message
## an offset of 20 second is added to the time window to count for nodes asynchrony
## peerList indicates the list of peers to query from.
## The history is fetched from all available peers in this list and then consolidated into one deduplicated list.
## Such candidates should be found through a discovery method (to be developed).
## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from.
## The history gets fetched successfully if the dialed peer has been online during the queried time window.
## the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string
# If store has not been provided, don't even try
if w.store.isNil():
return err("store not provided (nil)")
# NOTE: Original implementation is based on the message's sender timestamp. At the moment
# of writing, the sqlite store implementation returns the last message's receiver
# timestamp.
# lastSeenTime = lastSeenItem.get().msg.timestamp
let
lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0))
now = getNanosecondTime(getTime().toUnixFloat())
debug "resuming with offline time window",
lastSeenTime = lastSeenTime, currentTime = now
let
queryEndTime = now + StoreResumeTimeWindowOffset
queryStartTime = max(lastSeenTime - StoreResumeTimeWindowOffset, 0)
let req = HistoryQuery(
pubsubTopic: some(pubsubTopic),
startTime: some(queryStartTime),
endTime: some(queryEndTime),
pageSize: uint64(pageSize),
direction: default(),
)
var res: WakuStoreResult[seq[WakuMessage]]
if peerList.isSome():
debug "trying the candidate list to fetch the history"
res = await w.queryLoop(req, peerList.get())
else:
debug "no candidate list is provided, selecting a random peer"
# if no peerList is set then query from one of the peers stored in the peer manager
let peerOpt = w.peerManager.selectPeer(WakuStoreCodec)
if peerOpt.isNone():
warn "no suitable remote peers"
waku_legacy_store_errors.inc(labelValues = [peerNotFoundFailure])
return err("no suitable remote peers")
debug "a peer is selected from peer manager"
res = await w.queryAll(req, peerOpt.get())
if res.isErr():
debug "failed to resume the history"
return err("failed to resume the history")
# Save the retrieved messages in the store
var added: uint = 0
for msg in res.get():
let putStoreRes = w.store.put(pubsubTopic, msg)
if putStoreRes.isErr():
continue
added.inc()
return ok(added)

View File

@ -0,0 +1,109 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[options, sequtils], stew/results, stew/byteutils, nimcrypto/sha2
import ../waku_core, ../common/paging
const
WakuStoreCodec* = "/vac/waku/store/2.0.0-beta4"
DefaultPageSize*: uint64 = 20
MaxPageSize*: uint64 = 100
type WakuStoreResult*[T] = Result[T, string]
## Waku message digest
type MessageDigest* = MDigest[256]
proc computeDigest*(msg: WakuMessage): MessageDigest =
var ctx: sha256
ctx.init()
defer:
ctx.clear()
ctx.update(msg.contentTopic.toBytes())
ctx.update(msg.payload)
# Computes the hash
return ctx.finish()
## Public API types
type
HistoryCursor* = object
pubsubTopic*: PubsubTopic
senderTime*: Timestamp
storeTime*: Timestamp
digest*: MessageDigest
HistoryQuery* = object
pubsubTopic*: Option[PubsubTopic]
contentTopics*: seq[ContentTopic]
cursor*: Option[HistoryCursor]
startTime*: Option[Timestamp]
endTime*: Option[Timestamp]
pageSize*: uint64
direction*: PagingDirection
HistoryResponse* = object
messages*: seq[WakuMessage]
cursor*: Option[HistoryCursor]
HistoryErrorKind* {.pure.} = enum
UNKNOWN = uint32(000)
BAD_RESPONSE = uint32(300)
BAD_REQUEST = uint32(400)
TOO_MANY_REQUESTS = uint32(429)
SERVICE_UNAVAILABLE = uint32(503)
PEER_DIAL_FAILURE = uint32(504)
HistoryError* = object
case kind*: HistoryErrorKind
of PEER_DIAL_FAILURE:
address*: string
of BAD_RESPONSE, BAD_REQUEST:
cause*: string
else:
discard
HistoryResult* = Result[HistoryResponse, HistoryError]
proc parse*(T: type HistoryErrorKind, kind: uint32): T =
case kind
of 000, 200, 300, 400, 429, 503:
HistoryErrorKind(kind)
else:
HistoryErrorKind.UNKNOWN
proc `$`*(err: HistoryError): string =
case err.kind
of HistoryErrorKind.PEER_DIAL_FAILURE:
"PEER_DIAL_FAILURE: " & err.address
of HistoryErrorKind.BAD_RESPONSE:
"BAD_RESPONSE: " & err.cause
of HistoryErrorKind.BAD_REQUEST:
"BAD_REQUEST: " & err.cause
of HistoryErrorKind.TOO_MANY_REQUESTS:
"TOO_MANY_REQUESTS"
of HistoryErrorKind.SERVICE_UNAVAILABLE:
"SERVICE_UNAVAILABLE"
of HistoryErrorKind.UNKNOWN:
"UNKNOWN"
proc checkHistCursor*(self: HistoryCursor): Result[void, HistoryError] =
if self.pubsubTopic.len == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "empty pubsubTopic"))
if self.senderTime == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "invalid senderTime"))
if self.storeTime == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "invalid storeTime"))
if self.digest.data.all(
proc(x: byte): bool =
x == 0
):
return err(HistoryError(kind: BAD_REQUEST, cause: "empty digest"))
return ok()

View File

@ -0,0 +1,137 @@
## Waku Store protocol for historical messaging support.
## See spec for more details:
## https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-store.md
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/options,
stew/results,
chronicles,
chronos,
bearssl/rand,
libp2p/crypto/crypto,
libp2p/protocols/protocol,
libp2p/protobuf/minprotobuf,
libp2p/stream/connection,
metrics
import
../waku_core,
../node/peer_manager,
./common,
./rpc,
./rpc_codec,
./protocol_metrics,
../common/ratelimit,
../common/waku_service_metrics
logScope:
topics = "waku legacy store"
const MaxMessageTimestampVariance* = getNanoSecondTime(20)
# 20 seconds maximum allowable sender timestamp "drift"
type HistoryQueryHandler* =
proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.}
type WakuStore* = ref object of LPProtocol
peerManager: PeerManager
rng: ref rand.HmacDrbgContext
queryHandler*: HistoryQueryHandler
requestRateLimiter*: Option[TokenBucket]
## Protocol
proc initProtocolHandler(ws: WakuStore) =
proc handler(conn: Connection, proto: string) {.async.} =
let buf = await conn.readLp(DefaultMaxRpcSize.int)
let decodeRes = HistoryRPC.decode(buf)
if decodeRes.isErr():
error "failed to decode rpc", peerId = $conn.peerId
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
# TODO: Return (BAD_REQUEST, cause: "decode rpc failed")
return
let reqRpc = decodeRes.value
if reqRpc.query.isNone():
error "empty query rpc", peerId = $conn.peerId, requestId = reqRpc.requestId
waku_legacy_store_errors.inc(labelValues = [emptyRpcQueryFailure])
# TODO: Return (BAD_REQUEST, cause: "empty query")
return
if ws.requestRateLimiter.isSome() and not ws.requestRateLimiter.get().tryConsume(1):
trace "store query request rejected due rate limit exceeded",
peerId = $conn.peerId, requestId = reqRpc.requestId
let error = HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS).toRPC()
let response = HistoryResponseRPC(error: error)
let rpc = HistoryRPC(requestId: reqRpc.requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
waku_service_requests_rejected.inc(labelValues = ["Store"])
return
waku_service_requests.inc(labelValues = ["Store"])
let
requestId = reqRpc.requestId
request = reqRpc.query.get().toAPI()
info "received history query",
peerId = conn.peerId, requestId = requestId, query = request
waku_legacy_store_queries.inc()
var responseRes: HistoryResult
try:
responseRes = await ws.queryHandler(request)
except Exception:
error "history query failed",
peerId = $conn.peerId, requestId = requestId, error = getCurrentExceptionMsg()
let error = HistoryError(kind: HistoryErrorKind.UNKNOWN).toRPC()
let response = HistoryResponseRPC(error: error)
let rpc = HistoryRPC(requestId: requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
return
if responseRes.isErr():
error "history query failed",
peerId = $conn.peerId, requestId = requestId, error = responseRes.error
let response = responseRes.toRPC()
let rpc = HistoryRPC(requestId: requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
return
let response = responseRes.toRPC()
info "sending history response",
peerId = conn.peerId, requestId = requestId, messages = response.messages.len
let rpc = HistoryRPC(requestId: requestId, response: some(response))
await conn.writeLp(rpc.encode().buffer)
ws.handler = handler
ws.codec = WakuStoreCodec
proc new*(
T: type WakuStore,
peerManager: PeerManager,
rng: ref rand.HmacDrbgContext,
queryHandler: HistoryQueryHandler,
rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](),
): T =
# Raise a defect if history query handler is nil
if queryHandler.isNil():
raise newException(NilAccessDefect, "history query handler is nil")
let ws = WakuStore(
rng: rng,
peerManager: peerManager,
queryHandler: queryHandler,
requestRateLimiter: newTokenBucket(rateLimitSetting),
)
ws.initProtocolHandler()
ws

View File

@ -0,0 +1,18 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import metrics
declarePublicGauge waku_legacy_store_errors,
"number of legacy store protocol errors", ["type"]
declarePublicGauge waku_legacy_store_queries, "number of legacy store queries received"
# Error types (metric label values)
const
dialFailure* = "dial_failure"
decodeRpcFailure* = "decode_rpc_failure"
peerNotFoundFailure* = "peer_not_found_failure"
emptyRpcQueryFailure* = "empty_rpc_query_failure"
emptyRpcResponseFailure* = "empty_rpc_response_failure"

View File

@ -0,0 +1,258 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/options, nimcrypto/hash
import ../common/[protobuf, paging], ../waku_core, ./common, ./rpc
const DefaultMaxRpcSize* = -1
## Pagination
proc encode*(index: PagingIndexRPC): ProtoBuffer =
## Encode an Index object into a ProtoBuffer
## returns the resultant ProtoBuffer
var pb = initProtoBuffer()
pb.write3(1, index.digest.data)
pb.write3(2, zint64(index.receiverTime))
pb.write3(3, zint64(index.senderTime))
pb.write3(4, index.pubsubTopic)
pb.finish3()
pb
proc decode*(T: type PagingIndexRPC, buffer: seq[byte]): ProtobufResult[T] =
## creates and returns an Index object out of buffer
var rpc = PagingIndexRPC()
let pb = initProtoBuffer(buffer)
var data: seq[byte]
if not ?pb.getField(1, data):
return err(ProtobufError.missingRequiredField("digest"))
else:
var digest = MessageDigest()
for count, b in data:
digest.data[count] = b
rpc.digest = digest
var receiverTime: zint64
if not ?pb.getField(2, receiverTime):
return err(ProtobufError.missingRequiredField("receiver_time"))
else:
rpc.receiverTime = int64(receiverTime)
var senderTime: zint64
if not ?pb.getField(3, senderTime):
return err(ProtobufError.missingRequiredField("sender_time"))
else:
rpc.senderTime = int64(senderTime)
var pubsubTopic: string
if not ?pb.getField(4, pubsubTopic):
return err(ProtobufError.missingRequiredField("pubsub_topic"))
else:
rpc.pubsubTopic = pubsubTopic
ok(rpc)
proc encode*(rpc: PagingInfoRPC): ProtoBuffer =
## Encodes a PagingInfo object into a ProtoBuffer
## returns the resultant ProtoBuffer
var pb = initProtoBuffer()
pb.write3(1, rpc.pageSize)
pb.write3(2, rpc.cursor.map(encode))
pb.write3(
3,
rpc.direction.map(
proc(d: PagingDirection): uint32 =
uint32(ord(d))
),
)
pb.finish3()
pb
proc decode*(T: type PagingInfoRPC, buffer: seq[byte]): ProtobufResult[T] =
## creates and returns a PagingInfo object out of buffer
var rpc = PagingInfoRPC()
let pb = initProtoBuffer(buffer)
var pageSize: uint64
if not ?pb.getField(1, pageSize):
rpc.pageSize = none(uint64)
else:
rpc.pageSize = some(pageSize)
var cursorBuffer: seq[byte]
if not ?pb.getField(2, cursorBuffer):
rpc.cursor = none(PagingIndexRPC)
else:
let cursor = ?PagingIndexRPC.decode(cursorBuffer)
rpc.cursor = some(cursor)
var direction: uint32
if not ?pb.getField(3, direction):
rpc.direction = none(PagingDirection)
else:
rpc.direction = some(PagingDirection(direction))
ok(rpc)
## Wire protocol
proc encode*(rpc: HistoryContentFilterRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, rpc.contentTopic)
pb.finish3()
pb
proc decode*(T: type HistoryContentFilterRPC, buffer: seq[byte]): ProtobufResult[T] =
let pb = initProtoBuffer(buffer)
var contentTopic: ContentTopic
if not ?pb.getField(1, contentTopic):
return err(ProtobufError.missingRequiredField("content_topic"))
ok(HistoryContentFilterRPC(contentTopic: contentTopic))
proc encode*(rpc: HistoryQueryRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(2, rpc.pubsubTopic)
for filter in rpc.contentFilters:
pb.write3(3, filter.encode())
pb.write3(4, rpc.pagingInfo.map(encode))
pb.write3(
5,
rpc.startTime.map(
proc(time: int64): zint64 =
zint64(time)
),
)
pb.write3(
6,
rpc.endTime.map(
proc(time: int64): zint64 =
zint64(time)
),
)
pb.finish3()
pb
proc decode*(T: type HistoryQueryRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryQueryRPC()
let pb = initProtoBuffer(buffer)
var pubsubTopic: string
if not ?pb.getField(2, pubsubTopic):
rpc.pubsubTopic = none(string)
else:
rpc.pubsubTopic = some(pubsubTopic)
var buffs: seq[seq[byte]]
if not ?pb.getRepeatedField(3, buffs):
rpc.contentFilters = @[]
else:
for pb in buffs:
let filter = ?HistoryContentFilterRPC.decode(pb)
rpc.contentFilters.add(filter)
var pagingInfoBuffer: seq[byte]
if not ?pb.getField(4, pagingInfoBuffer):
rpc.pagingInfo = none(PagingInfoRPC)
else:
let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer)
rpc.pagingInfo = some(pagingInfo)
var startTime: zint64
if not ?pb.getField(5, startTime):
rpc.startTime = none(int64)
else:
rpc.startTime = some(int64(startTime))
var endTime: zint64
if not ?pb.getField(6, endTime):
rpc.endTime = none(int64)
else:
rpc.endTime = some(int64(endTime))
ok(rpc)
proc encode*(response: HistoryResponseRPC): ProtoBuffer =
var pb = initProtoBuffer()
for rpc in response.messages:
pb.write3(2, rpc.encode())
pb.write3(3, response.pagingInfo.map(encode))
pb.write3(4, uint32(ord(response.error)))
pb.finish3()
pb
proc decode*(T: type HistoryResponseRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryResponseRPC()
let pb = initProtoBuffer(buffer)
var messages: seq[seq[byte]]
if ?pb.getRepeatedField(2, messages):
for pb in messages:
let message = ?WakuMessage.decode(pb)
rpc.messages.add(message)
else:
rpc.messages = @[]
var pagingInfoBuffer: seq[byte]
if ?pb.getField(3, pagingInfoBuffer):
let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer)
rpc.pagingInfo = some(pagingInfo)
else:
rpc.pagingInfo = none(PagingInfoRPC)
var error: uint32
if not ?pb.getField(4, error):
return err(ProtobufError.missingRequiredField("error"))
else:
rpc.error = HistoryResponseErrorRPC.parse(error)
ok(rpc)
proc encode*(rpc: HistoryRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, rpc.requestId)
pb.write3(2, rpc.query.map(encode))
pb.write3(3, rpc.response.map(encode))
pb.finish3()
pb
proc decode*(T: type HistoryRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryRPC()
let pb = initProtoBuffer(buffer)
if not ?pb.getField(1, rpc.requestId):
return err(ProtobufError.missingRequiredField("request_id"))
var queryBuffer: seq[byte]
if not ?pb.getField(2, queryBuffer):
rpc.query = none(HistoryQueryRPC)
else:
let query = ?HistoryQueryRPC.decode(queryBuffer)
rpc.query = some(query)
var responseBuffer: seq[byte]
if not ?pb.getField(3, responseBuffer):
rpc.response = none(HistoryResponseRPC)
else:
let response = ?HistoryResponseRPC.decode(responseBuffer)
rpc.response = some(response)
ok(rpc)

View File

@ -0,0 +1,31 @@
##
## This file is aimed to attend the requests that come directly
## from the 'self' node. It is expected to attend the store requests that
## come from REST-store endpoint when those requests don't indicate
## any store-peer address.
##
## Notice that the REST-store requests normally assume that the REST
## server is acting as a store-client. In this module, we allow that
## such REST-store node can act as store-server as well by retrieving
## its own stored messages. The typical use case for that is when
## using `nwaku-compose`, which spawn a Waku node connected to a local
## database, and the user is interested in retrieving the messages
## stored by that local store node.
##
import stew/results, chronos, chronicles
import ./protocol, ./common
proc handleSelfStoreRequest*(
self: WakuStore, histQuery: HistoryQuery
): Future[WakuStoreResult[HistoryResponse]] {.async.} =
## Handles the store requests made by the node to itself.
## Normally used in REST-store requests
try:
let resp: HistoryResponse = (await self.queryHandler(histQuery)).valueOr:
return err("error in handleSelfStoreRequest: " & $error)
return WakuStoreResult[HistoryResponse].ok(resp)
except Exception:
return err("exception in handleSelfStoreRequest: " & getCurrentExceptionMsg())