Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Flink: Don't fail to serialize IcebergSourceSplit when there is too many delete files #9464

Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,14 @@ static IcebergSourceSplit deserializeV1(byte[] serialized) throws IOException {
}

byte[] serializeV2() throws IOException {
return serialize(2);
}

byte[] serializeV3() throws IOException {
return serialize(3);
}

private byte[] serialize(int version) throws IOException {
if (serializedBytesCache == null) {
DataOutputSerializer out = SERIALIZER_CACHE.get();
Collection<FileScanTask> fileScanTasks = task.tasks();
Expand All @@ -147,7 +155,7 @@ byte[] serializeV2() throws IOException {

for (FileScanTask fileScanTask : fileScanTasks) {
String taskJson = FileScanTaskParser.toJson(fileScanTask);
out.writeUTF(taskJson);
writeLongUTF(out, taskJson, version);
}

serializedBytesCache = out.getCopyOfBuffer();
Expand All @@ -157,21 +165,56 @@ byte[] serializeV2() throws IOException {
return serializedBytesCache;
}

private static void writeLongUTF(DataOutputSerializer out, String taskJson, int version)
stevenzwu marked this conversation as resolved.
Show resolved Hide resolved
throws IOException {
switch (version) {
case 2:
out.writeUTF(taskJson);
break;
case 3:
SerializerHelper.writeLongUTF(out, taskJson);
break;
default:
throw new IllegalArgumentException("Unsupported version: " + version);
}
}

static IcebergSourceSplit deserializeV2(byte[] serialized, boolean caseSensitive)
throws IOException {
return deserialize(serialized, caseSensitive, 2);
}

static IcebergSourceSplit deserializeV3(byte[] serialized, boolean caseSensitive)
throws IOException {
return deserialize(serialized, caseSensitive, 3);
}

private static IcebergSourceSplit deserialize(
byte[] serialized, boolean caseSensitive, int version) throws IOException {
DataInputDeserializer in = new DataInputDeserializer(serialized);
int fileOffset = in.readInt();
long recordOffset = in.readLong();
int taskCount = in.readInt();

List<FileScanTask> tasks = Lists.newArrayListWithCapacity(taskCount);
for (int i = 0; i < taskCount; ++i) {
String taskJson = in.readUTF();
String taskJson = readTaskJson(in, version);
FileScanTask task = FileScanTaskParser.fromJson(taskJson, caseSensitive);
tasks.add(task);
}

CombinedScanTask combinedScanTask = new BaseCombinedScanTask(tasks);
return IcebergSourceSplit.fromCombinedScanTask(combinedScanTask, fileOffset, recordOffset);
}

private static String readTaskJson(DataInputDeserializer in, int version) throws IOException {
switch (version) {
case 2:
return in.readUTF();
case 3:
return SerializerHelper.readLongUTF(in);
default:
throw new IllegalArgumentException("Unsupported version: " + version);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

@Internal
public class IcebergSourceSplitSerializer implements SimpleVersionedSerializer<IcebergSourceSplit> {
private static final int VERSION = 2;
private static final int VERSION = 3;

private final boolean caseSensitive;

Expand All @@ -39,7 +39,7 @@ public int getVersion() {

@Override
public byte[] serialize(IcebergSourceSplit split) throws IOException {
return split.serializeV2();
return split.serializeV3();
}

@Override
Expand All @@ -49,6 +49,8 @@ public IcebergSourceSplit deserialize(int version, byte[] serialized) throws IOE
return IcebergSourceSplit.deserializeV1(serialized);
case 2:
return IcebergSourceSplit.deserializeV2(serialized, caseSensitive);
case 3:
return IcebergSourceSplit.deserializeV3(serialized, caseSensitive);
default:
throw new IOException(
String.format(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.iceberg.flink.source.split;

import java.io.IOException;
import java.io.Serializable;
import java.io.UTFDataFormatException;
import org.apache.flink.core.memory.DataInputDeserializer;
import org.apache.flink.core.memory.DataOutputSerializer;

/**
* Helper class to serialize and deserialize strings longer than 65K. The inspiration is mostly
* taken from the class org.apache.flink.core.memory.DataInputSerializer.readUTF and
* org.apache.flink.core.memory.DataOutputSerializer.writeUTF.
*/
public class SerializerHelper implements Serializable {
stevenzwu marked this conversation as resolved.
Show resolved Hide resolved

/**
* Similar to {@link DataOutputSerializer#writeUTF(String)}. Except this supports larger payloads
* which is up to max integer value.
*
* @param out the output stream to write the string to.
* @param str the string value to be written.
* @deprecated This method is deprecated because there will be a method within the {@link
* DataOutputSerializer} already which does the same thing, so use that one instead once that
* is released on Flink version 1.20.
* <p>See * <a href="https://issues.apache.org/jira/browse/FLINK-34228">FLINK-34228</a> * <a
* href="https://github.com/apache/flink/pull/24191">https://github.com/apache/flink/pull/24191</a>
*/
@Deprecated
public static void writeLongUTF(DataOutputSerializer out, String str) throws IOException {
int strlen = str.length();
long utflen = 0;
int c;

/* use charAt instead of copying String to char array */
for (int i = 0; i < strlen; i++) {
c = str.charAt(i);
utflen += getUTFBytesSize(c);

if (utflen > Integer.MAX_VALUE) {
throw new UTFDataFormatException("Encoded string reached maximum length: " + utflen);
}
}
if (utflen > Integer.MAX_VALUE - 4) {
stevenzwu marked this conversation as resolved.
Show resolved Hide resolved
throw new UTFDataFormatException("Encoded string is too long: " + utflen);
}

Copy link
Contributor Author

@javrasya javrasya Apr 12, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

resize is already happening in the out.writeInt function, I don't think the following line is needed;

https://github.com/apache/flink/pull/24191/files#diff-fe4433a7c83db13761a692e0d9eb4e7e3be7a511b7aa938bb1bcbb7b70fa45ccR290

The resize function is also a private function, so we don't have access to it in here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out.writeInt only increases the buffer by 4 to hold the integer.

I think we need the resize call here. but this does impose the blocker here because DataInputDeserializer#resize(int minCapacityAdd) is a private method here. I am not sure this can work (just copying the read/writeLongUTF`.

if (utflen > Integer.MAX_VALUE - 4) {
            throw new UTFDataFormatException("Encoded string is too long: " + utflen);
        } else if (this.position > this.buffer.length - utflen - 2) {
            resize((int) utflen + 4);
        }

cc @pvary @elkhand

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn't out.write(bytearr, 0, count); in writeUTFBytes(out, str, (int) utflen); call the resize for us?

https://github.com/apache/flink/blob/f75935245799471ddf025d2bab0d0d212e79088e/flink-core/src/main/java/org/apache/flink/core/memory/DataOutputSerializer.java#L119

    @Override
    public void write(byte[] b, int off, int len) throws IOException {
        if (len < 0 || off > b.length - len) {
            throw new ArrayIndexOutOfBoundsException();
        }
        if (this.position > this.buffer.length - len) {
            resize(len);
        }
        System.arraycopy(b, off, this.buffer, this.position, len);
        this.position += len;
    }

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@pvary you are right.

I guess the resize call from the Flink PR is not necessary then. this is where @javrasya had the question for this thread. but I guess @javrasya 's original comment is not 100% inaccurate.

       if (utflen > Integer.MAX_VALUE - 4) {
            throw new UTFDataFormatException("Encoded string is too long: " + utflen);
        } else if (this.position > this.buffer.length - utflen - 2) {
            resize((int) utflen + 4);
        }

Copy link
Contributor Author

@javrasya javrasya Apr 16, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Exactly, as @pvary said, writeUTFBytes will resize the the array as needed already. The only downside of not resizing it preliminary like in the Flink PR could be that there will be two resizing one when writing out.writeInt((int) utflen); and then out.write(bytearr, 0, count);. This won't happen all the time, but if it happens to occur, it will cause original array to be copied to a new resized array twice by the resize function.

As an improvement, maybe I can combine these two by turning writeUTFBytes into writeUTFBytesWithSize and write only once as out.write(bytearr, 0, count + 4) (4 which is for the size) so that resizing of the actual byte array would be done once when needed. Then it will do exactly as in the Flink PR.

Wdyt @pvary @stevenzwu ?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As an improvement, maybe I can combine these two by turning writeUTFBytes into writeUTFBytesWithSize and write only once as out.write(bytearr, 0, count + 4) (4 which is for the size) so that resizing of the actual byte array would be done once when needed.

I am not sure that is necessary. with @pvary 's explanation, I think the current code is correct.

Copy link
Contributor Author

@javrasya javrasya Apr 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With the current code, it is possible that it resizes twice due to these two lines. I am not saying that it would run wrongly but two resize operations would cause two array copy operation (when the size is not big enough) which is worse than one resize. Or am I missing something 🤔 ?

The early resize in the Flink PR kind of prevents that. Now we don't have such behavior in this PR due to the reasons mentioned earlier. My suggestion above would reduce the number of resize operation to 1 since we will write both the utf length and the utf bytes all in one go.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@javrasya: I would keep your code even with the possibility of the double resize.
This is only temp code until we get the Flink code available. If we do not see an actual bottleneck, I would keep the code as simple as possible.

out.writeInt((int) utflen);
writeUTFBytes(out, str, (int) utflen);
}

/**
* Similar to {@link DataInputDeserializer#readUTF()}. Except this supports larger payloads which
* is up to max integer value.
*
* @param in the input stream to read the string from.
* @return the string value read from the input stream.
* @throws IOException if an I/O error occurs when reading from the input stream.
* @deprecated This method is deprecated because there will be a method within the {@link
* DataInputDeserializer} already which does the same thing, so use that one instead once that
* is released on Flink version 1.20.
* <p>See * <a href="https://issues.apache.org/jira/browse/FLINK-34228">FLINK-34228</a> * <a
* href="https://github.com/apache/flink/pull/24191">https://github.com/apache/flink/pull/24191</a>
*/
@Deprecated
stevenzwu marked this conversation as resolved.
Show resolved Hide resolved
public static String readLongUTF(DataInputDeserializer in) throws IOException {
int utflen = in.readInt();
byte[] bytearr = new byte[utflen];
char[] chararr = new char[utflen];

int c, char2, char3;
int count = 0;
int chararrCount = 0;

in.readFully(bytearr, 0, utflen);

while (count < utflen) {
c = (int) bytearr[count] & 0xff;
if (c > 127) {
break;
}
count++;
chararr[chararrCount++] = (char) c;
}

while (count < utflen) {
c = (int) bytearr[count] & 0xff;
switch (c >> 4) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
/* 0xxxxxxx */
count++;
chararr[chararrCount++] = (char) c;
break;
case 12:
case 13:
/* 110x xxxx 10xx xxxx */
count += 2;
if (count > utflen) {
throw new UTFDataFormatException("malformed input: partial character at end");
}
char2 = (int) bytearr[count - 1];
if ((char2 & 0xC0) != 0x80) {
throw new UTFDataFormatException("malformed input around byte " + count);
}
chararr[chararrCount++] = (char) (((c & 0x1F) << 6) | (char2 & 0x3F));
break;
case 14:
/* 1110 xxxx 10xx xxxx 10xx xxxx */
count += 3;
if (count > utflen) {
throw new UTFDataFormatException("malformed input: partial character at end");
}
char2 = (int) bytearr[count - 2];
char3 = (int) bytearr[count - 1];
if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80)) {
throw new UTFDataFormatException("malformed input around byte " + (count - 1));
}
chararr[chararrCount++] =
(char) (((c & 0x0F) << 12) | ((char2 & 0x3F) << 6) | (char3 & 0x3F));
break;
default:
/* 10xx xxxx, 1111 xxxx */
throw new UTFDataFormatException("malformed input around byte " + count);
}
}
// The number of chars produced may be less than utflen
return new String(chararr, 0, chararrCount);
}

private static int getUTFBytesSize(int c) {
if ((c >= 0x0001) && (c <= 0x007F)) {
return 1;
} else if (c > 0x07FF) {
return 3;
} else {
return 2;
}
}

private static void writeUTFBytes(DataOutputSerializer out, String str, int utflen)
throws IOException {
int strlen = str.length();
int c;

int len = Math.max(1024, utflen);

byte[] bytearr = new byte[len];
int count = 0;

int i;
for (i = 0; i < strlen; i++) {
c = str.charAt(i);
if (!((c >= 0x0001) && (c <= 0x007F))) {
break;
}
bytearr[count++] = (byte) c;
}

for (; i < strlen; i++) {
c = str.charAt(i);
if ((c >= 0x0001) && (c <= 0x007F)) {
bytearr[count++] = (byte) c;
} else if (c > 0x07FF) {
bytearr[count++] = (byte) (0xE0 | ((c >> 12) & 0x0F));
bytearr[count++] = (byte) (0x80 | ((c >> 6) & 0x3F));
bytearr[count++] = (byte) (0x80 | (c & 0x3F));
} else {
bytearr[count++] = (byte) (0xC0 | ((c >> 6) & 0x1F));
bytearr[count++] = (byte) (0x80 | (c & 0x3F));
}
}

out.write(bytearr, 0, count);
}
}
Loading
Loading