diff --git a/helpers/deproxy.py b/helpers/deproxy.py index 4ac81f675..6b105e30b 100644 --- a/helpers/deproxy.py +++ b/helpers/deproxy.py @@ -991,6 +991,7 @@ def __init__( self.bind_addr = bind_addr or tf_cfg.cfg.get("Client", "ip") self.error_codes = [] self.socket_family = socket_family + self._socket_options = list() def __stop_client(self): dbg(self, 4, "Stop", prefix="\t") @@ -1009,6 +1010,8 @@ def run_start(self): self.create_socket( socket.AF_INET if self.socket_family == "ipv4" else socket.AF_INET6, socket.SOCK_STREAM ) + for option in self._socket_options: + self.socket.setsockopt(option[0], option[1], option[2]) if self.bind_addr: self.bind((self.bind_addr, 0)) self.connect((self.conn_addr, self.port)) @@ -1016,6 +1019,10 @@ def run_start(self): def clear(self): self.request_buffer = "" + def add_socket_options(self, level: int, optname: int, value_: int): + """This method should be used before `run_start`.""" + self._socket_options.append((level, optname, value_)) + def set_request(self, message_chain): if message_chain: self.request = message_chain.request diff --git a/http2_general/test_max_queued_control_frames.py b/http2_general/test_max_queued_control_frames.py index 44189f834..e6f996583 100644 --- a/http2_general/test_max_queued_control_frames.py +++ b/http2_general/test_max_queued_control_frames.py @@ -4,6 +4,9 @@ __copyright__ = "Copyright (C) 2024 Tempesta Technologies, Inc." __license__ = "GPL2" +import socket +import time + from h2.settings import SettingCodes from hyperframe.frame import HeadersFrame, PingFrame, PriorityFrame, SettingsFrame @@ -59,20 +62,14 @@ def __update_tempesta_config(self, limit: int) -> None: limit = "" if limit == 10000 else f"max_queued_control_frames {limit};" self.get_tempesta().config.defconfig = self.get_tempesta().config.defconfig % limit - @staticmethod - def __init_connection_and_disable_readable(client) -> None: + def __init_connection_and_disable_readable(self, client) -> None: + client.update_initial_settings() + client.send_bytes(client.h2_connection.data_to_send()) + self.assertTrue(client.wait_for_ack_settings()) + client.make_request(client.create_request(method="GET", headers=[])) client.readable = lambda: False # disable socket for reading - @staticmethod - def __send_invalid_request(client) -> None: - # TODO it should be removed after correcting the log output in dmesg - client.restart() - client.readable = lambda: True # enable socket for reading - client.send_request( - client.create_request(method="GET", headers=[("x-forwarded-for", "123")]), "400" - ) - @parameterize.expand( [ param( @@ -89,41 +86,61 @@ def __send_invalid_request(client) -> None: ) @dmesg.unlimited_rate_on_tempesta_node def test(self, name, frame): - self.__update_tempesta_config(10) - self.start_all_services() + self.__update_tempesta_config(100) + client = self.get_client("deproxy") + # Set a small buffer for the client. it is needed for stability of tests. + # The different VMs have a different size of buffer + client.add_socket_options(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024) + + self.start_all_services() + # requests a large data from backend and disable socket for reading. + # Tempesta can not send any data to the client until the client enable socket for reading self.__init_connection_and_disable_readable(client) + # wait until the client buffer is full + time.sleep(2) + # the client should send more frames for stability of test - for _ in range(20): # max_queued_control_frames is 10. + for _ in range(110): # max_queued_control_frames is 100. client.send_bytes(frame, expect_response=False) - self.assertTrue( - client.wait_for_connection_close(), - "TempestaFW did not block client after exceeding `max_queued_control_frames` limit.", - ) - self.__send_invalid_request(client) self.assertTrue( self.oops.find( "Warning: Too many control frames in send queue, closing connection", - cond=lambda matches: len(matches) >= 0, + dmesg.amount_positive, ), "An unexpected number of dmesg warnings", ) + client.readable = lambda: True # enable socket for reading + self.assertTrue( + client.wait_for_connection_close(), + "TempestaFW did not block client after exceeding `max_queued_control_frames` limit.", + ) @parameterize.expand( [ param(name="default_limit", limit=10000), - param(name="10_limit", limit=10), + param(name="100_limit", limit=100), ] ) @dmesg.unlimited_rate_on_tempesta_node def test_reset_stream(self, name, limit: int): self.__update_tempesta_config(limit) - self.start_all_services() + client = self.get_client("deproxy") + # Set a small buffer for the client. it is needed for stability of tests. + # The different VMs have a different size of buffer + client.add_socket_options(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024) + + self.start_all_services() + # requests a large data from backend and disable socket for reading. + # Tempesta can not send any data to the client until the client enable socket for reading self.__init_connection_and_disable_readable(client) + # wait until the client buffer is full + time.sleep(2) + headers = [ (":method", "GET"), (":path", "/"), @@ -132,7 +149,7 @@ def test_reset_stream(self, name, limit: int): ] # the client should send more frames for stability of test - for i in range(3, limit * 10, 2): # max_queued_control_frames is 10. + for i in range(3, int(limit * 2.2), 2): hf = HeadersFrame( stream_id=i, data=client.h2_connection.encoder.encode(headers), @@ -141,15 +158,15 @@ def test_reset_stream(self, name, limit: int): prio = PriorityFrame(stream_id=i, depends_on=i).serialize() client.send_bytes(hf + prio, expect_response=False) - self.assertTrue( - client.wait_for_connection_close(), - "TempestaFW did not block client after exceeding `max_queued_control_frames` limit.", - ) - self.__send_invalid_request(client) self.assertTrue( self.oops.find( "Warning: Too many control frames in send queue, closing connection", - cond=lambda matches: len(matches) >= 0, + dmesg.amount_positive, ), "An unexpected number of dmesg warnings", ) + client.readable = lambda: True # enable socket for reading + self.assertTrue( + client.wait_for_connection_close(), + "TempestaFW did not block client after exceeding `max_queued_control_frames` limit.", + )