Relay Publish: Extend coverage (#2)

* small improvements

* minor adjustments

* new tests

* try with more runners

* try with more runners2

* tweaks for parallel run

* new tests

* small tweaks

* new tests

* new tests

* test remove defaults from CI

* handle empty strings for env vars in CI

* add nodekey to main node

* add more tests

* finishing touches

* fixes based on Alex suggestions

* revert unwanted change

* add new pause test
This commit is contained in:
Florin Barbu 2023-11-17 08:47:22 +02:00 committed by GitHub
parent 88b1fa3e90
commit 589368f434
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 546 additions and 156 deletions

View File

@ -11,7 +11,7 @@ on:
node1:
required: false
type: string
default: "wakuorg/nwaku:deploy-wakuv2-test"
default: "wakuorg/nwaku:latest"
node2:
required: false
type: string
@ -27,8 +27,8 @@ on:
env:
FORCE_COLOR: "1"
NODE_1: ${{ inputs.node1 || 'wakuorg/nwaku:deploy-wakuv2-test' }}
NODE_2: ${{ inputs.node2 || 'wakuorg/go-waku:latest' }}
NODE_1: ${{ inputs.node1 }}
NODE_2: ${{ inputs.node2 }}
PROTOCOL: ${{ inputs.protocol || 'REST' }}
jobs:
@ -49,7 +49,7 @@ jobs:
- run: pip install -r requirements.txt
- name: Run tests
run: pytest -n 3 --reruns 1 --alluredir=allure-results
run: pytest -n 4 --reruns 2 --alluredir=allure-results
- name: Get allure history
if: always()

View File

@ -5,3 +5,4 @@ log_cli = True
log_file = log/test.log
log_cli_format = %(asctime)s %(name)s %(levelname)s %(message)s
log_file_format = %(asctime)s %(name)s %(levelname)s %(message)s
timeout = 300

View File

@ -6,8 +6,10 @@ pre-commit
pyright
pytest
pytest-instafail
pytest-timeout
pytest-xdist
pytest-rerunfailures
python-dotenv
requests
tenacity
typeguard

View File

@ -1,13 +1,6 @@
from dataclasses import dataclass, field
from marshmallow_dataclass import class_schema
from typing import Optional
@dataclass
class MessageRpcQuery:
payload: str
contentTopic: str
timestamp: Optional[int] = None
from typing import Optional, Union
@dataclass
@ -15,9 +8,10 @@ class MessageRpcResponse:
payload: str
contentTopic: str
version: Optional[int]
timestamp: int
timestamp: Optional[int]
ephemeral: Optional[bool]
rateLimitProof: Optional[dict] = field(default_factory=dict)
meta: Optional[str]
rateLimitProof: Optional[Union[dict, str]] = field(default_factory=dict)
rate_limit_proof: Optional[dict] = field(default_factory=dict)

View File

@ -6,20 +6,23 @@ load_dotenv() # This will load environment variables from a .env file if it exi
def get_env_var(var_name, default=None):
env_var = os.getenv(var_name, default)
if env_var is not None:
print(f"{var_name}: {env_var}")
else:
if env_var in [None, ""]:
print(f"{var_name} is not set; using default value: {default}")
env_var = default
print(f"{var_name}: {env_var}")
return env_var
# Configuration constants. Need to be upercase to appear in reports
NODE_1 = get_env_var("NODE_1", "wakuorg/nwaku:latest")
NODE_2 = get_env_var("NODE_2", "wakuorg/go-waku:latest")
LOG_DIR = get_env_var("LOG_DIR", "./log")
NODE_1 = get_env_var("NODE_1", "wakuorg/go-waku:latest")
NODE_2 = get_env_var("NODE_2", "wakuorg/nwaku:latest")
DOCKER_LOG_DIR = get_env_var("DOCKER_LOG_DIR", "./log/docker")
NETWORK_NAME = get_env_var("NETWORK_NAME", "waku")
SUBNET = get_env_var("SUBNET", "172.18.0.0/16")
IP_RANGE = get_env_var("IP_RANGE", "172.18.0.0/24")
GATEWAY = get_env_var("GATEWAY", "172.18.0.1")
DEFAULT_PUBSUBTOPIC = get_env_var("DEFAULT_PUBSUBTOPIC", "/waku/2/default-waku/proto")
DEFAULT_PUBSUB_TOPIC = get_env_var("DEFAULT_PUBSUB_TOPIC", "/waku/2/default-waku/proto")
PROTOCOL = get_env_var("PROTOCOL", "REST")
RUNNING_IN_CI = get_env_var("CI")
NODEKEY = get_env_var("NODEKEY", "30348dd51465150e04a5d9d932c72864c8967f806cce60b5d26afeca1e77eb68")
API_REQUEST_TIMEOUT = get_env_var("API_REQUEST_TIMEOUT", 10)

View File

@ -1,9 +1,10 @@
import logging
from time import sleep
from src.libs.custom_logger import get_custom_logger
import os
import base64
import allure
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
def bytes_to_hex(byte_array):
@ -24,5 +25,10 @@ def to_base64(input_data):
def attach_allure_file(file):
logger.debug("Attaching file %s", file)
logger.debug(f"Attaching file {file}")
allure.attach.file(file, name=os.path.basename(file), attachment_type=allure.attachment_type.TEXT)
def delay(num_seconds):
logger.debug(f"Sleeping for {num_seconds} seconds")
sleep(num_seconds)

24
src/libs/custom_logger.py Normal file
View File

@ -0,0 +1,24 @@
import logging
max_log_line_length = 5000
def log_length_filter(max_length):
class logLengthFilter(logging.Filter):
def filter(self, record):
if len(record.getMessage()) > max_length:
logging.getLogger(record.name).log(
record.levelno, f"Log line was discarded because it's longer than max_log_line_length={max_log_line_length}"
)
return False
return True
return logLengthFilter()
def get_custom_logger(name):
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("docker").setLevel(logging.WARNING)
logger = logging.getLogger(name)
logger.addFilter(log_length_filter(max_log_line_length))
return logger

View File

@ -1,9 +1,10 @@
import logging
import requests
from tenacity import retry, stop_after_delay, wait_fixed
from abc import ABC, abstractmethod
from src.env_vars import API_REQUEST_TIMEOUT
from src.libs.custom_logger import get_custom_logger
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
class BaseClient(ABC):
@ -11,20 +12,20 @@ class BaseClient(ABC):
# useful when running tests in parallel, where occasional network-related errors such as
# connection drops, timeouts, or temporary unavailability of a service can occur. Retrying
# ensures that such intermittent issues don't cause the tests to fail outright.
@retry(stop=stop_after_delay(2), wait=wait_fixed(0.1), reraise=True)
@retry(stop=stop_after_delay(0.5), wait=wait_fixed(0.1), reraise=True)
def make_request(self, method, url, headers=None, data=None):
logger.debug("%s call: %s with payload: %s", method.upper(), url, data)
response = requests.request(method.upper(), url, headers=headers, data=data)
logger.debug(f"{method.upper()} call: {url} with payload: {data}")
response = requests.request(method.upper(), url, headers=headers, data=data, timeout=API_REQUEST_TIMEOUT)
try:
response.raise_for_status()
except requests.HTTPError as http_err:
logger.error("HTTP error occurred: %s", http_err)
logger.error(f"HTTP error occurred: {http_err}. Response content: {response.content}")
raise
except Exception as err:
logger.error("An error occurred: %s", err)
logger.error(f"An error occurred: {err}. Response content: {response.content}")
raise
else:
logger.info("Response status code: %s", response.status_code)
logger.info(f"Response status code: {response.status_code}. Response content: {response.content}")
return response
@abstractmethod

View File

@ -1,10 +1,10 @@
import logging
from src.libs.custom_logger import get_custom_logger
import json
from dataclasses import asdict
from urllib.parse import quote
from src.node.api_clients.base_client import BaseClient
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
class REST(BaseClient):
@ -24,7 +24,7 @@ class REST(BaseClient):
return self.rest_call("post", "relay/v1/subscriptions", json.dumps(pubsub_topics))
def send_message(self, message, pubsub_topic):
return self.rest_call("post", f"relay/v1/messages/{quote(pubsub_topic, safe='')}", json.dumps(asdict(message)))
return self.rest_call("post", f"relay/v1/messages/{quote(pubsub_topic, safe='')}", json.dumps(message))
def get_messages(self, pubsub_topic):
get_messages_response = self.rest_call("get", f"relay/v1/messages/{quote(pubsub_topic, safe='')}")

View File

@ -1,9 +1,9 @@
import logging
from src.libs.custom_logger import get_custom_logger
import json
from dataclasses import asdict
from src.node.api_clients.base_client import BaseClient
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
class RPC(BaseClient):
@ -28,7 +28,7 @@ class RPC(BaseClient):
return self.rpc_call("post_waku_v2_relay_v1_subscription", [pubsub_topics])
def send_message(self, message, pubsub_topic):
return self.rpc_call("post_waku_v2_relay_v1_message", [pubsub_topic, asdict(message)])
return self.rpc_call("post_waku_v2_relay_v1_message", [pubsub_topic, message])
def get_messages(self, pubsub_topic):
get_messages_response = self.rpc_call("get_waku_v2_relay_v1_messages", [pubsub_topic])

View File

@ -1,5 +1,5 @@
import os
import logging
from src.libs.custom_logger import get_custom_logger
import random
import threading
import docker
@ -7,20 +7,20 @@ from src.env_vars import NETWORK_NAME, SUBNET, IP_RANGE, GATEWAY
from docker.types import IPAMConfig, IPAMPool
from docker.errors import NotFound
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
class DockerManager:
def __init__(self, image):
self._image = image
self._client = docker.from_env()
logger.debug("Docker client initialized with image %s", self._image)
logger.debug(f"Docker client initialized with image {self._image}")
def create_network(self, network_name=NETWORK_NAME):
logger.debug("Attempting to create or retrieve network %s", network_name)
logger.debug(f"Attempting to create or retrieve network {network_name}")
networks = self._client.networks.list(names=[network_name])
if networks:
logger.debug("Network %s already exists", network_name)
logger.debug(f"Network {network_name} already exists")
return networks[0]
network = self._client.networks.create(
@ -28,7 +28,7 @@ class DockerManager:
driver="bridge",
ipam=IPAMConfig(driver="default", pool_configs=[IPAMPool(subnet=SUBNET, iprange=IP_RANGE, gateway=GATEWAY)]),
)
logger.debug("Network %s created", network_name)
logger.debug(f"Network {network_name} created")
return network
def start_container(self, image_name, ports, args, log_path, container_ip):
@ -39,14 +39,14 @@ class DockerManager:
else:
cli_args.append(f"--{key}={value}") # Add a single command
port_bindings = {f"{port}/tcp": ("", port) for port in ports}
logger.debug("Starting container with image %s", image_name)
logger.debug("Using args %s", cli_args)
logger.debug(f"Starting container with image {image_name}")
logger.debug(f"Using args {cli_args}")
container = self._client.containers.run(image_name, command=cli_args, ports=port_bindings, detach=True, remove=True, auto_remove=True)
network = self._client.networks.get(NETWORK_NAME)
network.connect(container, ipv4_address=container_ip)
logger.debug("Container started with ID %s. Setting up logs at %s", container.short_id, log_path)
logger.debug(f"Container started with ID {container.short_id}. Setting up logs at {log_path}")
log_thread = threading.Thread(target=self._log_container_output, args=(container, log_path))
log_thread.daemon = True
log_thread.start()
@ -63,14 +63,14 @@ class DockerManager:
if base_port is None:
base_port = random.randint(1024, 65535 - count)
ports = [base_port + i for i in range(count)]
logger.debug("Generated ports %s", ports)
logger.debug(f"Generated ports {ports}")
return ports
@staticmethod
def generate_random_ext_ip():
base_ip_fragments = ["172", "18"]
ext_ip = ".".join(base_ip_fragments + [str(random.randint(0, 255)) for _ in range(2)])
logger.debug("Generated random external IP %s", ext_ip)
logger.debug(f"Generated random external IP {ext_ip}")
return ext_ip
def is_container_running(self, container):
@ -78,7 +78,7 @@ class DockerManager:
refreshed_container = self._client.containers.get(container.id)
return refreshed_container.status == "running"
except NotFound:
logger.error("Container with ID %s not found", container.id)
logger.error(f"Container with ID {container.id} not found")
return False
@property

View File

@ -1,27 +1,34 @@
import os
import logging
from src.libs.common import delay
from src.libs.custom_logger import get_custom_logger
from tenacity import retry, stop_after_delay, wait_fixed
from src.node.api_clients.rpc import RPC
from src.node.api_clients.rest import REST
from src.node.docker_mananger import DockerManager
from src.env_vars import LOG_DIR, DEFAULT_PUBSUBTOPIC, PROTOCOL
from src.env_vars import DOCKER_LOG_DIR, DEFAULT_PUBSUB_TOPIC, PROTOCOL
from src.data_storage import DS
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
class WakuNode:
def __init__(self, docker_image, docker_log_prefix=""):
self._image_name = docker_image
self._log_path = os.path.join(LOG_DIR, f"{docker_log_prefix}__{self._image_name.replace('/', '_')}.log")
self._log_path = os.path.join(DOCKER_LOG_DIR, f"{docker_log_prefix}__{self._image_name.replace('/', '_')}.log")
self._docker_manager = DockerManager(self._image_name)
self._container = None
logger.debug(f"WakuNode instance initialized with log path {self._log_path}")
@retry(stop=stop_after_delay(5), wait=wait_fixed(0.1), reraise=True)
def start(self, **kwargs):
logger.debug("Starting Node...")
self._docker_manager.create_network()
self._ext_ip = self._docker_manager.generate_random_ext_ip()
self._ports = self._docker_manager.generate_ports()
self._rest_port = self._ports[0]
self._rpc_port = self._ports[1]
self._websocket_port = self._ports[2]
logger.debug("WakuNode instance initialized with log path %s", self._log_path)
if PROTOCOL == "RPC":
self._api = RPC(self._rpc_port, self._image_name)
elif PROTOCOL == "REST":
@ -29,11 +36,6 @@ class WakuNode:
else:
raise ValueError(f"Unknown protocol: {PROTOCOL}")
@retry(stop=stop_after_delay(5), wait=wait_fixed(0.1), reraise=True)
def start(self, **kwargs):
logger.debug("Starting Node...")
self._docker_manager.create_network()
default_args = {
"listen-address": "0.0.0.0",
"rpc": "true",
@ -42,6 +44,7 @@ class WakuNode:
"rest-admin": "true",
"websocket-support": "true",
"log-level": "TRACE",
"rest-relay-cache-capacity": "100",
"websocket-port": str(self._ports[3]),
"rpc-port": self._rpc_port,
"rest-port": self._rest_port,
@ -50,12 +53,12 @@ class WakuNode:
"rpc-address": "0.0.0.0",
"rest-address": "0.0.0.0",
"nat": f"extip:{self._ext_ip}",
"pubsub-topic": DEFAULT_PUBSUBTOPIC,
"pubsub-topic": DEFAULT_PUBSUB_TOPIC,
}
if "go-waku" in self._docker_manager.image:
go_waku_args = {
"min-relay-peers-to-publish": "0",
"min-relay-peers-to-publish": "1",
"legacy-filter": "false",
"log-level": "DEBUG",
}
@ -67,35 +70,71 @@ class WakuNode:
self._container = self._docker_manager.start_container(self._docker_manager.image, self._ports, default_args, self._log_path, self._ext_ip)
logger.debug(
"Started container from image %s. RPC: %s REST: %s WebSocket: %s", self._image_name, self._rpc_port, self._rest_port, self._websocket_port
f"Started container from image {self._image_name}. RPC: {self._rpc_port} REST: {self._rest_port} WebSocket: {self._websocket_port}"
)
DS.waku_nodes.append(self)
delay(1) # if we fire requests to soon after starting the node will sometimes fail to start correctly
try:
self.ensure_ready()
except Exception as e:
logger.error("%s service did not become ready in time: %s", PROTOCOL, e)
except Exception as ex:
logger.error(f"{PROTOCOL} service did not become ready in time: {ex}")
raise
@retry(stop=stop_after_delay(5), wait=wait_fixed(0.1), reraise=True)
def stop(self):
if self._container:
logger.debug("Stopping container with id %s", self._container.short_id)
logger.debug(f"Stopping container with id {self._container.short_id}")
self._container.stop()
logger.debug("Container stopped.")
@retry(stop=stop_after_delay(5), wait=wait_fixed(0.05), reraise=True)
def restart(self):
if self._container:
logger.debug(f"Restarting container with id {self._container.short_id}")
self._container.restart()
def pause(self):
if self._container:
logger.debug(f"Pausing container with id {self._container.short_id}")
self._container.pause()
def unpause(self):
if self._container:
logger.debug(f"Unpause container with id {self._container.short_id}")
self._container.unpause()
@retry(stop=stop_after_delay(10), wait=wait_fixed(0.1), reraise=True)
def ensure_ready(self):
self.info()
logger.debug("RPC service is ready.")
logger.info(f"{PROTOCOL} service is ready !!")
def info(self):
return self._api.info()
def set_subscriptions(self, pubsub_topics=[DEFAULT_PUBSUBTOPIC]):
def set_subscriptions(self, pubsub_topics=None):
if not pubsub_topics:
pubsub_topics = [DEFAULT_PUBSUB_TOPIC]
return self._api.set_subscriptions(pubsub_topics)
def send_message(self, message, pubsub_topic=DEFAULT_PUBSUBTOPIC):
def send_message(self, message, pubsub_topic=DEFAULT_PUBSUB_TOPIC):
return self._api.send_message(message, pubsub_topic)
def get_messages(self, pubsub_topic=DEFAULT_PUBSUBTOPIC):
def get_messages(self, pubsub_topic=DEFAULT_PUBSUB_TOPIC):
return self._api.get_messages(pubsub_topic)
@property
def image(self):
return self._image_name
def type(self):
if self.is_nwaku():
return "nwaku"
elif self.is_gowaku():
return "gowaku"
else:
raise ValueError("Unknown node type!!!")
def is_nwaku(self):
return "nwaku" in self.image
def is_gowaku(self):
return "go-waku" in self.image

View File

@ -1,38 +1,81 @@
import logging
from time import sleep, time
from src.libs.custom_logger import get_custom_logger
import math
from time import time
import pytest
import allure
from src.libs.common import to_base64, delay
from src.data_classes import message_rpc_response_schema
from src.env_vars import NODE_1, NODE_2
from src.env_vars import NODE_1, NODE_2, NODEKEY
from src.node.waku_node import WakuNode
from tenacity import retry, stop_after_delay, wait_fixed
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
class StepsRelay:
@pytest.fixture(scope="function", autouse=True)
def setup_nodes(self, request):
self.node1 = WakuNode(NODE_1, request.cls.test_id)
self.node1.start(relay="true", discv5_discovery="true", peer_exchange="true")
self.node1 = WakuNode(NODE_1, "node1_" + request.cls.test_id)
self.node1.start(relay="true", discv5_discovery="true", peer_exchange="true", nodekey=NODEKEY)
enr_uri = self.node1.info()["enrUri"]
self.node2 = WakuNode(NODE_2, request.cls.test_id)
self.node2 = WakuNode(NODE_2, "node2_" + request.cls.test_id)
self.node2.start(relay="true", discv5_discovery="true", discv5_bootstrap_node=enr_uri, peer_exchange="true")
self.test_pubsub_topic = "test"
self.test_content_topic = "/test/1/waku-relay"
self.test_pubsub_topic = "/waku/2/rs/18/1"
self.test_content_topic = "/test/1/waku-relay/proto"
self.test_payload = "Relay works!!"
self.node1.set_subscriptions([self.test_pubsub_topic])
self.node2.set_subscriptions([self.test_pubsub_topic])
@pytest.fixture(scope="function", autouse=True)
def network_warm_up(self, setup_nodes):
try:
self.wait_for_published_message_to_reach_peer(120)
logger.info("WARM UP successful !!")
except Exception as ex:
raise TimeoutError(f"WARM UP FAILED WITH: {ex}")
@allure.step
@retry(stop=stop_after_delay(20), wait=wait_fixed(0.5), reraise=True)
def check_published_message_reaches_peer(self, message):
message.timestamp = int(time() * 1e9)
self.node1.send_message(message, self.test_pubsub_topic)
sleep(0.1)
get_messages_response = self.node2.get_messages(self.test_pubsub_topic)
logger.debug("Got reponse from remote peer %s", get_messages_response)
def check_published_message_reaches_peer(self, message, pubsub_topic=None, message_propagation_delay=0.1):
self.node1.send_message(message, pubsub_topic or self.test_pubsub_topic)
delay(message_propagation_delay)
get_messages_response = self.node2.get_messages(pubsub_topic or self.test_pubsub_topic)
assert get_messages_response, "Peer node couldn't find any messages"
received_message = message_rpc_response_schema.load(get_messages_response[0])
assert received_message.payload == message.payload
assert received_message.contentTopic == message.contentTopic
assert received_message.timestamp == message.timestamp
self.assert_received_message(message, received_message)
def assert_received_message(self, sent_message, received_message):
def assert_fail_message(field_name):
return f"Incorrect field: {field_name}. Published: {sent_message[field_name]} Received: {getattr(received_message, field_name)}"
assert received_message.payload == sent_message["payload"], assert_fail_message("payload")
assert received_message.contentTopic == sent_message["contentTopic"], assert_fail_message("contentTopic")
if sent_message.get("timestamp") is not None:
if isinstance(sent_message["timestamp"], float):
assert math.isclose(float(received_message.timestamp), sent_message["timestamp"], rel_tol=1e-9), assert_fail_message("timestamp")
else:
assert str(received_message.timestamp) == str(sent_message["timestamp"]), assert_fail_message("timestamp")
if "version" in sent_message:
assert str(received_message.version) == str(sent_message["version"]), assert_fail_message("version")
if "meta" in sent_message:
assert str(received_message.meta) == str(sent_message["meta"]), assert_fail_message("meta")
if "ephemeral" in sent_message:
assert str(received_message.ephemeral) == str(sent_message["ephemeral"]), assert_fail_message("ephemeral")
if "rateLimitProof" in sent_message:
assert str(received_message.rateLimitProof) == str(sent_message["rateLimitProof"]), assert_fail_message("rateLimitProof")
def wait_for_published_message_to_reach_peer(self, timeout_duration, time_between_retries=1):
@retry(stop=stop_after_delay(timeout_duration), wait=wait_fixed(time_between_retries), reraise=True)
def check_peer_connection():
message = {"payload": to_base64(self.test_payload), "contentTopic": self.test_content_topic, "timestamp": int(time() * 1e9)}
self.check_published_message_reaches_peer(message)
check_peer_connection()
def ensure_subscriptions_on_nodes(self, node_list, pubsub_topic_list):
for node in node_list:
node.set_subscriptions(pubsub_topic_list)
def create_message(self, **kwargs):
message = {"payload": to_base64(self.test_payload), "contentTopic": self.test_content_topic, "timestamp": int(time() * 1e9)}
message.update(kwargs)
return message

View File

@ -1,42 +1,100 @@
from time import time
from datetime import datetime, timedelta
from src.env_vars import DEFAULT_PUBSUB_TOPIC
NOW = datetime.now()
SAMPLE_INPUTS = [
{"description": "A simple string.", "value": "Hello World!"},
{"description": "An integer.", "value": "1234567890"},
{"description": "A dictionary.", "value": '{"key": "value"}'},
{"description": "Chinese characters.", "value": "这是一些中文"},
{"description": "Emojis.", "value": "🚀🌟✨"},
{"description": "Lorem ipsum text.", "value": "Lorem ipsum dolor sit amet"},
{"description": "HTML content.", "value": "<html><body>Hello</body></html>"},
{"description": "Cyrillic characters.", "value": "\u041f\u0440\u0438\u0432\u0435\u0442"},
{"description": "Base64 encoded string.", "value": "Base64==dGVzdA=="},
{"description": "Binary data.", "value": "d29ya2luZyB3aXRoIGJpbmFyeSBkYXRh: \x50\x51"},
{"description": "Special characters with whitespace.", "value": "\t\nSpecial\tCharacters\n"},
{"description": "Boolean false as a string.", "value": "False"},
{"description": "A float number.", "value": "3.1415926535"},
{"description": "A list.", "value": "[1, 2, 3, 4, 5]"},
{"description": "Hexadecimal number as a string.", "value": "0xDEADBEEF"},
{"description": "Email format.", "value": "user@example.com"},
{"description": "URL format.", "value": "http://example.com"},
{"description": "Date and time in ISO format.", "value": "2023-11-01T12:00:00Z"},
{"description": "String with escaped quotes.", "value": '"Escaped" \\"quotes\\"'},
{"description": "A regular expression.", "value": "Regular expression: ^[a-z0-9_-]{3,16}$"},
{"description": "A very long string.", "value": "x" * 1000},
{"description": "A JSON string.", "value": '{"name": "John", "age": 30, "city": "New York"}'},
{"description": "A Unix path.", "value": "/usr/local/bin"},
{"description": "A Windows path.", "value": "C:\\Windows\\System32"},
{"description": "An SQL query.", "value": "SELECT * FROM users WHERE id = 1;"},
{"description": "JavaScript code snippet.", "value": "function test() { console.log('Hello World'); }"},
{"description": "A CSS snippet.", "value": "body { background-color: #fff; }"},
{"description": "A Python one-liner.", "value": "print('Hello World')"},
{"description": "An IP address.", "value": "192.168.1.1"},
{"description": "A domain name.", "value": "www.example.com"},
{"description": "A user agent string.", "value": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"},
{"description": "A credit card number.", "value": "1234-5678-9012-3456"},
{"description": "A phone number.", "value": "+1234567890"},
{"description": "A UUID.", "value": "123e4567-e89b-12d3-a456-426614174000"},
{"description": "A hashtag.", "value": "#helloWorld"},
{"description": "A Twitter handle.", "value": "@username"},
{"description": "A password.", "value": "P@ssw0rd!"},
{"description": "A date in common format.", "value": "01/11/2023"},
{"description": "A time string.", "value": "12:00:00"},
{"description": "A mathematical equation.", "value": "E = mc^2"},
{"description": "A simple string", "value": "Hello World!"},
{"description": "An integer", "value": "1234567890"},
{"description": "A dictionary", "value": '{"key": "value"}'},
{"description": "Chinese characters", "value": "这是一些中文"},
{"description": "Emojis", "value": "🚀🌟✨"},
{"description": "Lorem ipsum text", "value": "Lorem ipsum dolor sit amet"},
{"description": "HTML content", "value": "<html><body>Hello</body></html>"},
{"description": "Cyrillic characters", "value": "\u041f\u0440\u0438\u0432\u0435\u0442"},
{"description": "Base64 encoded string", "value": "Base64==dGVzdA=="},
{"description": "Binary data", "value": "d29ya2luZyB3aXRoIGJpbmFyeSBkYXRh: \x50\x51"},
{"description": "Special characters with whitespace", "value": "\t\nSpecial\tCharacters\n"},
{"description": "Boolean false as a string", "value": "False"},
{"description": "A float number", "value": "3.1415926535"},
{"description": "A list", "value": "[1, 2, 3, 4, 5]"},
{"description": "Hexadecimal number as a string", "value": "0xDEADBEEF"},
{"description": "Email format", "value": "user@example.com"},
{"description": "URL format", "value": "http://example.com"},
{"description": "Date and time in ISO format", "value": "2023-11-01T12:00:00Z"},
{"description": "String with escaped quotes", "value": '"Escaped" \\"quotes\\"'},
{"description": "A regular expression", "value": "Regular expression: ^[a-z0-9_-]{3,16}$"},
{"description": "A very long string", "value": "x" * 1000},
{"description": "A JSON string", "value": '{"name": "John", "age": 30, "city": "New York"}'},
{"description": "A Unix path", "value": "/usr/local/bin"},
{"description": "A Windows path", "value": "C:\\Windows\\System32"},
{"description": "An SQL query", "value": "SELECT * FROM users WHERE id = 1;"},
{"description": "JavaScript code snippet", "value": "function test() { console.log('Hello World'); }"},
{"description": "A CSS snippet", "value": "body { background-color: #fff; }"},
{"description": "A Python one-liner", "value": "print('Hello World')"},
{"description": "An IP address", "value": "192.168.1.1"},
{"description": "A domain name", "value": "www.example.com"},
{"description": "A user agent string", "value": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"},
{"description": "A credit card number", "value": "1234-5678-9012-3456"},
{"description": "A phone number", "value": "+1234567890"},
{"description": "A UUID", "value": "123e4567-e89b-12d3-a456-426614174000"},
{"description": "A hashtag", "value": "#helloWorld"},
{"description": "A Twitter handle", "value": "@username"},
{"description": "A password", "value": "P@ssw0rd!"},
{"description": "A date in common format", "value": "01/11/2023"},
{"description": "A time string", "value": "12:00:00"},
{"description": "A mathematical equation", "value": "E = mc^2"},
]
INVALID_PAYLOADS = [
{"description": "Empty string", "value": ""},
{"description": "Unecoded text", "value": "Hello World!"},
{"description": "A dictionary", "value": {"key": "YWFh"}},
{"description": "An integer", "value": 1234567890},
{"description": "A list", "value": ["YWFh"]},
{"description": "A bool", "value": True},
]
INVALID_CONTENT_TOPICS = [
{"description": "Empty string", "value": ""},
{"description": "A dictionary", "value": {"key": "YWFh"}},
{"description": "An integer", "value": 1234567890},
{"description": "A list", "value": ["YWFh"]},
{"description": "A bool", "value": True},
]
VALID_PUBSUB_TOPICS = [
DEFAULT_PUBSUB_TOPIC,
"/waku/2/rs/18/1",
"/test/2/rs/18/1",
"/waku/3/rs/18/1",
"/waku/2/test/18/1",
"/waku/2/rs/66/1",
"/waku/2/rs/18/50",
"/waku/18/50",
"test",
]
SAMPLE_TIMESTAMPS = [
{"description": "Now", "value": int(time() * 1e9), "valid_for": ["nwaku", "gowaku"]},
{
"description": "Far future",
"value": int((NOW + timedelta(days=365 * 10)).timestamp() * 1e9),
"valid_for": ["nwaku", "gowaku"],
}, # 10 years from now
{"description": "Recent past", "value": int((NOW - timedelta(hours=1)).timestamp() * 1e9), "valid_for": ["nwaku", "gowaku"]}, # 1 hour ago
{"description": "Near future", "value": int((NOW + timedelta(hours=1)).timestamp() * 1e9), "valid_for": ["nwaku", "gowaku"]}, # 1 hour ahead
{"description": "Positive number", "value": 1, "valid_for": ["nwaku", "gowaku"]},
{"description": "Negative number", "value": -1, "valid_for": ["nwaku", "gowaku"]},
{"description": "DST change", "value": int(datetime(2020, 3, 8, 2, 0, 0).timestamp() * 1e9), "valid_for": ["nwaku", "gowaku"]}, # DST starts
{"description": "Timestamp as string number", "value": str(int(time() * 1e9)), "valid_for": []},
{"description": "Invalid large number", "value": 2**63, "valid_for": []},
{"description": "Float number", "value": float(time() * 1e9), "valid_for": []},
{"description": "Array instead of timestamp", "value": [int(time() * 1e9)], "valid_for": []},
{"description": "Object instead of timestamp", "value": {"time": int(time() * 1e9)}, "valid_for": []},
{"description": "ISO 8601 timestamp", "value": "2023-12-26T10:58:51", "valid_for": []},
{"description": "Missing", "value": None, "valid_for": ["gowaku"]},
]

View File

@ -1,4 +0,0 @@
import logging
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("docker").setLevel(logging.WARNING)

View File

@ -1,15 +1,16 @@
# -*- coding: utf-8 -*-
import glob
import logging
from src.libs.custom_logger import get_custom_logger
import os
import pytest
from datetime import datetime
from time import time
from uuid import uuid4
from src.libs.common import attach_allure_file
import src.env_vars as env_vars
from src.data_storage import DS
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
# See https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
@ -42,21 +43,36 @@ def test_id(request):
@pytest.fixture(scope="function", autouse=True)
def test_setup(request, test_id):
logger.debug("Running test: %s with id: %s", request.node.name, request.cls.test_id)
logger.debug(f"Running test: {request.node.name} with id: {request.cls.test_id}")
yield
for file in glob.glob(os.path.join(env_vars.DOCKER_LOG_DIR, "*")):
if os.path.getmtime(file) < time() - 3600:
logger.debug(f"Deleting old log file: {file}")
try:
os.remove(file)
except:
logger.error("Could not delete file")
@pytest.fixture(scope="function", autouse=True)
def attach_logs_on_fail(request):
yield
if request.node.rep_call.failed:
if env_vars.RUNNING_IN_CI and hasattr(request.node, "rep_call") and request.node.rep_call.failed:
logger.debug("Test failed, attempting to attach logs to the allure reports")
for file in glob.glob(os.path.join(env_vars.LOG_DIR, request.cls.test_id + "*")):
for file in glob.glob(os.path.join(env_vars.DOCKER_LOG_DIR, "*" + request.cls.test_id + "*")):
attach_allure_file(file)
@pytest.fixture(scope="function", autouse=True)
def close_open_nodes():
def close_open_nodes(attach_logs_on_fail):
DS.waku_nodes = []
yield
crashed_containers = []
for node in DS.waku_nodes:
node.stop()
try:
node.stop()
except Exception as ex:
if "No such container" in str(ex):
crashed_containers.append(node.image)
logger.error(f"Failed to stop container because of error {ex}")
assert not crashed_containers, f"Containers {crashed_containers} crashed during the test!!!"

View File

@ -1,34 +1,241 @@
import logging
from src.libs.common import to_base64
from src.data_classes import MessageRpcQuery
from src.libs.custom_logger import get_custom_logger
from time import time
from src.libs.common import delay, to_base64
from src.steps.relay import StepsRelay
from src.test_data import SAMPLE_INPUTS
from src.test_data import INVALID_CONTENT_TOPICS, INVALID_PAYLOADS, SAMPLE_INPUTS, SAMPLE_TIMESTAMPS, VALID_PUBSUB_TOPICS
from src.data_classes import message_rpc_response_schema
logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)
class TestRelayPublish(StepsRelay):
def test_publish_with_various_payloads(self):
def test_publish_with_valid_payloads(self):
failed_payloads = []
for payload in SAMPLE_INPUTS:
logger.debug("Running test with payload %s", payload["description"])
message = MessageRpcQuery(payload=to_base64(payload["value"]), contentTopic=self.test_content_topic)
logger.debug(f'Running test with payload {payload["description"]}')
message = self.create_message(payload=to_base64(payload["value"]))
try:
self.check_published_message_reaches_peer(message)
except Exception as e:
logger.error("Payload %s failed: %s", {payload["description"]}, {str(e)})
failed_payloads.append(payload)
logger.error(f'Payload {payload["description"]} failed: {str(e)}')
failed_payloads.append(payload["description"])
assert not failed_payloads, f"Payloads failed: {failed_payloads}"
def test_publish_with_various_content_topics(self):
def test_publish_with_invalid_payloads(self):
success_payloads = []
for payload in INVALID_PAYLOADS:
logger.debug(f'Running test with payload {payload["description"]}')
message = self.create_message(payload=payload["value"])
try:
self.node1.send_message(message, self.test_pubsub_topic)
success_payloads.append(payload)
except Exception as ex:
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
assert not success_payloads, f"Invalid Payloads that didn't failed: {success_payloads}"
def test_publish_with_missing_payload(self):
message = {"contentTopic": self.test_content_topic, "timestamp": int(time() * 1e9)}
try:
self.node1.send_message(message, self.test_pubsub_topic)
raise AssertionError("Publish with missing payload worked!!!")
except Exception as ex:
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
def test_publish_with_payload_less_than_one_mb(self):
payload_length = 1024 * 1023
logger.debug(f"Running test with payload length of {payload_length} bytes")
message = self.create_message(payload=to_base64("a" * (payload_length)))
self.check_published_message_reaches_peer(message, message_propagation_delay=2)
def test_publish_with_payload_equal_or_more_than_one_mb(self):
for payload_length in [1024 * 1024, 1024 * 1024 * 10]:
logger.debug(f"Running test with payload length of {payload_length} bytes")
message = self.create_message(payload=to_base64("a" * (payload_length)))
try:
self.check_published_message_reaches_peer(message, message_propagation_delay=2)
raise AssertionError("Duplicate message was retrieved twice")
except Exception as ex:
assert "Peer node couldn't find any messages" in str(ex)
def test_publish_with_valid_content_topics(self):
failed_content_topics = []
for content_topic in SAMPLE_INPUTS:
logger.debug("Running test with content topic %s", content_topic["description"])
message = MessageRpcQuery(payload=to_base64(self.test_payload), contentTopic=content_topic["value"])
logger.debug(f'Running test with content topic {content_topic["description"]}')
message = self.create_message(contentTopic=content_topic["value"])
try:
self.check_published_message_reaches_peer(message)
except Exception as e:
logger.error("ContentTopic %s failed: %s", {content_topic["description"]}, {str(e)})
logger.error(f'ContentTopic {content_topic["description"]} failed: {str(e)}')
failed_content_topics.append(content_topic)
assert not failed_content_topics, f"ContentTopics failed: {failed_content_topics}"
def test_publish_with_invalid_content_topics(self):
success_content_topics = []
for content_topic in INVALID_CONTENT_TOPICS:
logger.debug(f'Running test with contetn topic {content_topic["description"]}')
message = self.create_message(contentTopic=content_topic["value"])
try:
self.node1.send_message(message, self.test_pubsub_topic)
success_content_topics.append(content_topic)
except Exception as ex:
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
assert not success_content_topics, f"Invalid Content topics that didn't failed: {success_content_topics}"
def test_publish_with_missing_content_topic(self):
message = {"payload": to_base64(self.test_payload), "timestamp": int(time() * 1e9)}
try:
self.node1.send_message(message, self.test_pubsub_topic)
raise AssertionError("Publish with missing content_topic worked!!!")
except Exception as ex:
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
def test_publish_on_multiple_pubsub_topics(self):
self.ensure_subscriptions_on_nodes([self.node1, self.node2], VALID_PUBSUB_TOPICS)
failed_pubsub_topics = []
for pubsub_topic in VALID_PUBSUB_TOPICS:
logger.debug(f"Running test with pubsub topic {pubsub_topic}")
try:
self.check_published_message_reaches_peer(self.create_message(), pubsub_topic=pubsub_topic)
except Exception as e:
logger.error(f"PubusubTopic {pubsub_topic} failed: {str(e)}")
failed_pubsub_topics.append(pubsub_topic)
assert not failed_pubsub_topics, f"PubusubTopic failed: {failed_pubsub_topics}"
def test_message_published_on_different_pubsub_topic_is_not_retrieved(self):
self.ensure_subscriptions_on_nodes([self.node1, self.node2], VALID_PUBSUB_TOPICS)
self.node1.send_message(self.create_message(), VALID_PUBSUB_TOPICS[0])
delay(0.1)
messages = self.node2.get_messages(VALID_PUBSUB_TOPICS[1])
assert not messages, "Message was retrieved on wrong pubsub_topic"
def test_publish_on_unsubscribed_pubsub_topic(self):
try:
self.check_published_message_reaches_peer(self.create_message(), pubsub_topic="/waku/2/rs/19/1")
raise AssertionError("Publish on unsubscribed pubsub_topic worked!!!")
except Exception as ex:
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
def test_publish_with_valid_timestamps(self):
failed_timestamps = []
for timestamp in SAMPLE_TIMESTAMPS:
if self.node1.type() in timestamp["valid_for"]:
logger.debug(f'Running test with timestamp {timestamp["description"]}')
message = self.create_message(timestamp=timestamp["value"])
try:
self.check_published_message_reaches_peer(message)
except Exception as ex:
logger.error(f'Timestamp {timestamp["description"]} failed: {str(ex)}')
failed_timestamps.append(timestamp)
assert not failed_timestamps, f"Timestamps failed: {failed_timestamps}"
def test_publish_with_invalid_timestamps(self):
success_timestamps = []
for timestamp in SAMPLE_TIMESTAMPS:
if self.node1.type() not in timestamp["valid_for"]:
logger.debug(f'Running test with timestamp {timestamp["description"]}')
message = self.create_message(timestamp=timestamp["value"])
try:
self.check_published_message_reaches_peer(message)
success_timestamps.append(timestamp)
except Exception as e:
pass
assert not success_timestamps, f"Invalid Timestamps that didn't failed: {success_timestamps}"
def test_publish_with_no_timestamp(self):
message = {"payload": to_base64(self.test_payload), "contentTopic": self.test_content_topic}
self.check_published_message_reaches_peer(message)
def test_publish_with_valid_version(self):
self.check_published_message_reaches_peer(self.create_message(version=10))
def test_publish_with_invalid_version(self):
try:
self.check_published_message_reaches_peer(self.create_message(version=2.1))
raise AssertionError("Publish with invalid version worked!!!")
except Exception as ex:
assert "Bad Request" in str(ex)
def test_publish_with_valid_meta(self):
self.check_published_message_reaches_peer(self.create_message(meta=to_base64(self.test_payload)))
def test_publish_with_invalid_meta(self):
try:
self.check_published_message_reaches_peer(self.create_message(meta=self.test_payload))
raise AssertionError("Publish with invalid meta worked!!!")
except Exception as ex:
assert "Bad Request" in str(ex)
def test_publish_with_ephemeral(self):
failed_ephemeral = []
for ephemeral in [True, False]:
logger.debug(f"Running test with Ephemeral {ephemeral}")
try:
self.check_published_message_reaches_peer(self.create_message(ephemeral=ephemeral))
except Exception as e:
logger.error(f"Massage with Ephemeral {ephemeral} failed: {str(e)}")
failed_ephemeral.append(ephemeral)
assert not failed_ephemeral, f"Ephemeral that failed: {failed_ephemeral}"
def test_publish_with_rate_limit_proof(self):
rate_limit_proof = {
"proof": to_base64("proofData"),
"epoch": to_base64("epochData"),
"nullifier": to_base64("nullifierData"),
}
self.check_published_message_reaches_peer(self.create_message(rateLimitProof=rate_limit_proof))
def test_publish_with_extra_field(self):
self.check_published_message_reaches_peer(self.create_message(extraField="extraValue"))
def test_publish_and_retrieve_duplicate_message(self):
message = self.create_message()
self.check_published_message_reaches_peer(message)
try:
self.check_published_message_reaches_peer(message)
raise AssertionError("Duplicate message was retrieved twice")
except Exception as ex:
assert "Peer node couldn't find any messages" in str(ex)
def test_publish_while_peer_is_paused(self):
message = self.create_message()
self.node2.pause()
self.node1.send_message(message, self.test_pubsub_topic)
self.node2.unpause()
get_messages_response = self.node2.get_messages(self.test_pubsub_topic)
assert get_messages_response, "Peer node couldn't find any messages"
received_message = message_rpc_response_schema.load(get_messages_response[0])
self.assert_received_message(message, received_message)
def test_publish_after_node_pauses_and_pauses(self):
self.check_published_message_reaches_peer(self.create_message())
self.node1.pause()
self.node1.unpause()
self.check_published_message_reaches_peer(self.create_message(payload=to_base64("M1")))
self.node2.pause()
self.node2.unpause()
self.check_published_message_reaches_peer(self.create_message(payload=to_base64("M2")))
def test_publish_after_node1_restarts(self):
self.check_published_message_reaches_peer(self.create_message())
self.node1.restart()
self.ensure_subscriptions_on_nodes([self.node1, self.node2], [self.test_pubsub_topic])
self.wait_for_published_message_to_reach_peer(20)
def test_publish_after_node2_restarts(self):
self.check_published_message_reaches_peer(self.create_message())
self.node2.restart()
self.ensure_subscriptions_on_nodes([self.node1, self.node2], [self.test_pubsub_topic])
self.wait_for_published_message_to_reach_peer(20)
def test_publish_and_retrieve_100_messages(self):
num_messages = 100 # if increase this number make sure to also increase rest-relay-cache-capacity flag
for index in range(num_messages):
message = self.create_message(payload=to_base64(f"M_{index}"))
self.node1.send_message(message, self.test_pubsub_topic)
delay(1)
messages = self.node2.get_messages(self.test_pubsub_topic)
assert len(messages) == num_messages
for index, message in enumerate(messages):
assert message["payload"] == to_base64(
f"M_{index}"
), f'Incorrect payload at index: {index}. Published {to_base64(f"M_{index}")} Received {message["payload"]}'