From b41dffeb640716a0dfe3273aae151f8537c3d2b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Jaramago=20Fern=C3=A1ndez?= Date: Thu, 13 Oct 2022 10:57:09 +0200 Subject: [PATCH] Fix handling of big split compressed packets --- lib/mysql_data_stream.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/mysql_data_stream.cpp b/lib/mysql_data_stream.cpp index 678978211..bfef86636 100644 --- a/lib/mysql_data_stream.cpp +++ b/lib/mysql_data_stream.cpp @@ -1082,9 +1082,13 @@ int MySQL_Data_Stream::buffer2array() { if ((datalength-progress) >= (CompPktIN.pkt.size-CompPktIN.partial)) { // we can copy till the end of the packet memcpy((char *)CompPktIN.pkt.ptr + CompPktIN.partial , _ptr+progress, CompPktIN.pkt.size - CompPktIN.partial); - CompPktIN.partial=0; + // 'progress' is required to be updated with the actual copied size to the target packet. This + // is, taking into account the already copied data, 'CompPktIN.partial', otherwise, in case of + // split packets, we could jump over the remaining unprocessed data. progress+= CompPktIN.pkt.size - CompPktIN.partial; PSarrayIN->add(CompPktIN.pkt.ptr, CompPktIN.pkt.size); + // Reset partial after full packet datalength has been processed + CompPktIN.partial=0; CompPktIN.pkt.ptr=NULL; // sanity } else { // not enough data for the whole packet