Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support cancel when writing fap snapshots #9415

Merged
merged 58 commits into from
Oct 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
4c9873e
a
CalvinNeo Sep 6, 2024
6ce1cf3
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 9, 2024
c3f53a2
f
CalvinNeo Sep 10, 2024
4d036d3
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 10, 2024
93e7dbb
a
CalvinNeo Sep 10, 2024
21e7d84
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 10, 2024
e0fa695
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.cpp
CalvinNeo Sep 11, 2024
036d841
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.cpp
CalvinNeo Sep 11, 2024
8a12d16
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.h
CalvinNeo Sep 11, 2024
0e015b3
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.cpp
CalvinNeo Sep 11, 2024
5120d57
a
CalvinNeo Sep 11, 2024
de48102
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 11, 2024
2f27cb9
addr
CalvinNeo Sep 11, 2024
ea82123
a
CalvinNeo Sep 11, 2024
0cf4235
support cancel when building
CalvinNeo Sep 11, 2024
2792a48
fmt
CalvinNeo Sep 11, 2024
392bedf
a
CalvinNeo Sep 12, 2024
779cb79
a
CalvinNeo Sep 12, 2024
7bbca76
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 13, 2024
824c3d8
a
CalvinNeo Sep 14, 2024
199013e
address cmt
CalvinNeo Sep 14, 2024
33aa19e
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 14, 2024
cd95445
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 14, 2024
866910d
Update dbms/src/Storages/DeltaMerge/DeltaMergeStore.h
CalvinNeo Sep 18, 2024
945584d
Update dbms/src/Storages/DeltaMerge/DeltaMergeStore_Ingest.cpp
CalvinNeo Sep 18, 2024
8ccfc30
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 18, 2024
deb6b1d
Update dbms/src/Storages/DeltaMerge/DeltaMergeStore.h
CalvinNeo Sep 18, 2024
2cd8986
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 18, 2024
736d57e
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 18, 2024
509fd6e
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeer.cpp
CalvinNeo Sep 18, 2024
e379e28
add
CalvinNeo Sep 18, 2024
de011da
fix sig
CalvinNeo Sep 19, 2024
7c416ac
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 19, 2024
17c1369
reject too many raft log
CalvinNeo Sep 20, 2024
9739723
change all log
CalvinNeo Sep 20, 2024
82e1284
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 20, 2024
9e40d9d
fir log format
CalvinNeo Sep 20, 2024
327340c
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 20, 2024
1bc1b9b
Merge remote-tracking branch 'upstream/master' into fix-lock-segment-…
CalvinNeo Sep 20, 2024
9b457d4
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/CheckpointIngestInf…
CalvinNeo Sep 20, 2024
fce5d52
clear codes createTargetSegmentsFromCheckpoint
CalvinNeo Sep 20, 2024
e259a0b
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 20, 2024
d1f22bd
fix lock contention
CalvinNeo Sep 20, 2024
ced81e4
a
CalvinNeo Sep 23, 2024
9a1588e
make it run
CalvinNeo Sep 23, 2024
17403c8
rewrite readAllSegmentsMetaInfoInRange
CalvinNeo Sep 23, 2024
258aec3
trace
CalvinNeo Sep 23, 2024
bed9de6
first
CalvinNeo Sep 24, 2024
9af68be
Revert "first"
CalvinNeo Sep 24, 2024
8a12b2e
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 25, 2024
9c38da8
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 25, 2024
f23fb31
remove
CalvinNeo Sep 25, 2024
533e6df
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 25, 2024
e7ae2de
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 25, 2024
a9dfbd6
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 26, 2024
a19faf4
Merge branch 'master' into fix-lock-segment-cache
JaySon-Huang Oct 9, 2024
6fa4b03
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Oct 12, 2024
f16aae5
Apply suggestions from code review
JaySon-Huang Oct 12, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions dbms/src/Common/FailPoint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ namespace DB
M(pause_before_prehandle_subtask) \
M(pause_when_persist_region) \
M(pause_before_wn_establish_task) \
M(pause_when_building_fap_segments) \
M(pause_passive_flush_before_persist_region)

#define APPLY_FOR_RANDOM_FAILPOINTS(M) \
Expand Down
2 changes: 2 additions & 0 deletions dbms/src/Common/ProfileEvents.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@
M(S3CopyObject) \
M(S3GetObjectRetry) \
M(S3PutObjectRetry) \
M(S3IORead) \
M(S3IOSeek) \
M(FileCacheHit) \
M(FileCacheMiss) \
M(FileCacheEvict) \
Expand Down
4 changes: 4 additions & 0 deletions dbms/src/Common/TiFlashMetrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,7 @@ static_assert(RAFT_REGION_BIG_WRITE_THRES * 4 < RAFT_REGION_BIG_WRITE_MAX, "Inva
F(type_failed_baddata, {{"type", "failed_baddata"}}), \
F(type_failed_repeated, {{"type", "failed_repeated"}}), \
F(type_failed_build_chkpt, {{"type", "failed_build_chkpt"}}), \
F(type_reuse_chkpt_cache, {{"type", "reuse_chkpt_cache"}}), \
F(type_restore, {{"type", "restore"}}), \
F(type_succeed, {{"type", "succeed"}})) \
M(tiflash_fap_task_state, \
Expand All @@ -428,10 +429,12 @@ static_assert(RAFT_REGION_BIG_WRITE_THRES * 4 < RAFT_REGION_BIG_WRITE_MAX, "Inva
F(type_write_stage, {{"type", "write_stage"}}, ExpBucketsWithRange{0.2, 2, 120}), \
F(type_write_stage_build, {{"type", "write_stage_build"}}, ExpBucketsWithRange{0.2, 2, 120}), \
F(type_write_stage_raft, {{"type", "write_stage_raft"}}, ExpBucketsWithRange{0.2, 2, 30}), \
F(type_write_stage_wait_build, {{"type", "write_stage_wait_build"}}, ExpBucketsWithRange{0.2, 4, 120}), \
F(type_write_stage_insert, {{"type", "write_stage_insert"}}, ExpBucketsWithRange{0.2, 2, 30}), \
F(type_ingest_stage, {{"type", "ingest_stage"}}, ExpBucketsWithRange{0.2, 2, 30}), \
F(type_total, {{"type", "total"}}, ExpBucketsWithRange{0.2, 4, 300}), \
F(type_queue_stage, {{"type", "queue_stage"}}, ExpBucketsWithRange{0.2, 4, 300}), \
F(type_write_stage_read_segment, {{"type", "write_stage_read_segment"}}, ExpBucketsWithRange{0.2, 4, 120}), \
F(type_phase1_total, {{"type", "phase1_total"}}, ExpBucketsWithRange{0.2, 4, 300})) \
M(tiflash_raft_command_throughput, \
"", \
Expand Down Expand Up @@ -491,6 +494,7 @@ static_assert(RAFT_REGION_BIG_WRITE_THRES * 4 < RAFT_REGION_BIG_WRITE_MAX, "Inva
Histogram, \
F(type_applied_index, {{"type", "applied_index"}}, EqualWidthBuckets{0, 100, 15}), \
F(type_eager_gc_applied_index, {{"type", "eager_gc_applied_index"}}, EqualWidthBuckets{0, 100, 10}), \
F(type_unhandled_fap_raft_log, {{"type", "unhandled_fap_raft_log"}}, EqualWidthBuckets{0, 25, 80}), \
F(type_unflushed_applied_index, {{"type", "unflushed_applied_index"}}, EqualWidthBuckets{0, 100, 15})) \
M(tiflash_raft_raft_events_count, \
"Raft event counter", \
Expand Down
6 changes: 4 additions & 2 deletions dbms/src/Storages/DeltaMerge/DeltaMergeStore.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@

namespace DB
{

struct GeneralCancelHandle;
struct Settings;

class Logger;
Expand Down Expand Up @@ -398,17 +398,19 @@ class DeltaMergeStore

Segments buildSegmentsFromCheckpointInfo(
const DMContextPtr & dm_context,
const std::shared_ptr<GeneralCancelHandle> & cancel_handle,
const DM::RowKeyRange & range,
const CheckpointInfoPtr & checkpoint_info) const;

Segments buildSegmentsFromCheckpointInfo(
const Context & db_context,
const std::shared_ptr<GeneralCancelHandle> & cancel_handle,
const DB::Settings & db_settings,
const DM::RowKeyRange & range,
const CheckpointInfoPtr & checkpoint_info)
{
auto dm_context = newDMContext(db_context, db_settings);
return buildSegmentsFromCheckpointInfo(dm_context, range, checkpoint_info);
return buildSegmentsFromCheckpointInfo(dm_context, cancel_handle, range, checkpoint_info);
}

UInt64 ingestSegmentsFromCheckpointInfo(
Expand Down
20 changes: 17 additions & 3 deletions dbms/src/Storages/DeltaMerge/DeltaMergeStore_Ingest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1137,6 +1137,7 @@ bool DeltaMergeStore::ingestSegmentDataIntoSegmentUsingSplit(

Segments DeltaMergeStore::buildSegmentsFromCheckpointInfo(
const DMContextPtr & dm_context,
const std::shared_ptr<GeneralCancelHandle> & cancel_handle,
const DM::RowKeyRange & range,
const CheckpointInfoPtr & checkpoint_info) const
{
Expand All @@ -1146,15 +1147,28 @@ Segments DeltaMergeStore::buildSegmentsFromCheckpointInfo(
}
LOG_INFO(
log,
"Build checkpoint from remote, store_id={} region_id={}",
"Build checkpoint from remote, store_id={} region_id={} range={}",
checkpoint_info->remote_store_id,
checkpoint_info->region_id);
checkpoint_info->region_id,
range.toDebugString());
WriteBatches wbs{*dm_context->storage_pool};
try
{
auto segment_meta_infos = Segment::readAllSegmentsMetaInfoInRange(*dm_context, range, checkpoint_info);
auto segment_meta_infos
= Segment::readAllSegmentsMetaInfoInRange(*dm_context, cancel_handle, range, checkpoint_info);
if (cancel_handle->isCanceled())
{
// Will be cleared in `FastAddPeerWrite`.
return {};
}
LOG_INFO(
log,
"Finish read all segments meta info in range, region_id={} segments_num={}",
checkpoint_info->region_id,
segment_meta_infos.size());
auto restored_segments = Segment::createTargetSegmentsFromCheckpoint( //
log,
checkpoint_info->region_id,
*dm_context,
checkpoint_info->remote_store_id,
segment_meta_infos,
Expand Down
189 changes: 127 additions & 62 deletions dbms/src/Storages/DeltaMerge/Segment.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
#include <Storages/KVStore/KVStore.h>
#include <Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.h>
#include <Storages/KVStore/TMTContext.h>
#include <Storages/KVStore/Utils/AsyncTasks.h>
#include <Storages/Page/V3/PageEntryCheckpointInfo.h>
#include <Storages/Page/V3/Universal/UniversalPageIdFormatImpl.h>
#include <Storages/Page/V3/Universal/UniversalPageStorage.h>
Expand Down Expand Up @@ -122,7 +123,10 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int UNKNOWN_FORMAT_VERSION;
} // namespace ErrorCodes

namespace FailPoints
{
extern const char pause_when_building_fap_segments[];
} // namespace FailPoints
namespace DM
{
String SegmentSnapshot::detailInfo() const
Expand Down Expand Up @@ -433,98 +437,156 @@ SegmentPtr Segment::restoreSegment( //

Segment::SegmentMetaInfos Segment::readAllSegmentsMetaInfoInRange( //
DMContext & context,
const std::shared_ptr<GeneralCancelHandle> & cancel_handle,
const RowKeyRange & target_range,
const CheckpointInfoPtr & checkpoint_info)
{
auto fap_context = context.global_context.getSharedContextDisagg()->fap_context;
RUNTIME_CHECK(checkpoint_info != nullptr);
auto log = DB::Logger::get(fmt::format(
"region_id={} keyspace={} table_id={}",
checkpoint_info->region_id,
context.keyspace_id,
context.physical_table_id));
Stopwatch sw;
SCOPE_EXIT(
{ GET_METRIC(tiflash_fap_task_duration_seconds, type_write_stage_read_segment).Observe(sw.elapsedSeconds()); });

// If cache is empty, we read from DELTA_MERGE_FIRST_SEGMENT_ID to the end and build the cache.
// Otherwise, we just read the segment that cover the range.
PageIdU64 current_segment_id = DELTA_MERGE_FIRST_SEGMENT_ID;
auto end_to_segment_id_cache = checkpoint_info->checkpoint_data_holder->getEndToSegmentIdCache(
KeyspaceTableID{context.keyspace_id, context.physical_table_id});
auto lock = end_to_segment_id_cache->lock();
bool is_cache_ready = end_to_segment_id_cache->isReady(lock);
if (is_cache_ready)
{
current_segment_id
= end_to_segment_id_cache->getSegmentIdContainingKey(lock, target_range.getStart().toRowKeyValue());
}
LOG_DEBUG(Logger::get(), "Read segment meta info from segment {}", current_segment_id);
std::vector<std::pair<DM::RowKeyValue, UInt64>> end_key_and_segment_ids;
SegmentMetaInfos segment_infos;
while (current_segment_id != 0)
{
Segment::SegmentMetaInfo segment_info;
auto target_id = UniversalPageIdFormat::toFullPageId(
UniversalPageIdFormat::toFullPrefix(context.keyspace_id, StorageType::Meta, context.physical_table_id),
current_segment_id);
auto page = checkpoint_info->temp_ps->read(target_id, nullptr, {}, false);
if unlikely (!page.isValid())

// Protected by whatever lock.
auto build_segments = [&](bool is_cache_ready, PageIdU64 current_segment_id)
-> std::optional<std::pair<std::vector<std::pair<DM::RowKeyValue, UInt64>>, SegmentMetaInfos>> {
// We have a cache that records all segments which map to a certain table identified by (keyspace_id, physical_table_id).
// We can thus avoid reading from the very beginning for every different regions in this table.
// If cache is empty, we read from DELTA_MERGE_FIRST_SEGMENT_ID to the end and build the cache.
// Otherwise, we just read the segment that cover the range.
LOG_DEBUG(log, "Read segment meta info, segment_id={}", current_segment_id);

// The map is used to build cache.
std::vector<std::pair<DM::RowKeyValue, UInt64>> end_key_and_segment_ids;
SegmentMetaInfos segment_infos;
while (current_segment_id != 0)
{
// After #7642, DELTA_MERGE_FIRST_SEGMENT_ID may not exist, however, such checkpoint won't be selected.
// If it were to be selected, the FAP task could fallback to regular snapshot.
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Can't find page id {}, keyspace={} table_id={} current_segment_id={} range={}",
target_id,
context.keyspace_id,
context.physical_table_id,
current_segment_id,
target_range.toDebugString());
if (cancel_handle->isCanceled())
{
LOG_INFO(log, "FAP is canceled when building segments, built={}", end_key_and_segment_ids.size());
// FAP task would be cleaned in FastAddPeerImplWrite. So returning empty result is OK.
return std::nullopt;
}
Segment::SegmentMetaInfo segment_info;
auto target_id = UniversalPageIdFormat::toFullPageId(
UniversalPageIdFormat::toFullPrefix(context.keyspace_id, StorageType::Meta, context.physical_table_id),
current_segment_id);
auto page = checkpoint_info->temp_ps->read(target_id, nullptr, {}, false);
if unlikely (!page.isValid())
{
// After #7642, DELTA_MERGE_FIRST_SEGMENT_ID may not exist, however, such checkpoint won't be selected.
// If it were to be selected, the FAP task could fallback to regular snapshot.
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Can't find page id {}, current_segment_id={} range={}",
target_id,
current_segment_id,
target_range.toDebugString());
}
segment_info.segment_id = current_segment_id;
ReadBufferFromMemory buf(page.data.begin(), page.data.size());
readSegmentMetaInfo(buf, segment_info);
if (!is_cache_ready)
{
end_key_and_segment_ids.emplace_back(
segment_info.range.getEnd().toRowKeyValue(),
segment_info.segment_id);
}
current_segment_id = segment_info.next_segment_id;
if (!(segment_info.range.shrink(target_range).none()))
{
segment_infos.emplace_back(segment_info);
}
// if not build cache, stop as early as possible.
if (is_cache_ready && segment_info.range.end.value->compare(*target_range.end.value) >= 0)
{
break;
}
}
segment_info.segment_id = current_segment_id;
ReadBufferFromMemory buf(page.data.begin(), page.data.size());
readSegmentMetaInfo(buf, segment_info);
return std::make_pair(end_key_and_segment_ids, segment_infos);
};

{
// If there is a table building cache, then other table may block to read the built cache.
// If the remote reader causes much time to retrieve data, then these tasks could block here.
// However, when the execlusive holder is canceled due to timeout, the readers could eventually get the lock.
auto lock = end_to_segment_id_cache->writeLock();
// - Set to `true`: The building task is done.
// - Set to `false`: It is not build yet, or it is building.
bool is_cache_ready = end_to_segment_id_cache->isReady(lock);
GET_METRIC(tiflash_fap_task_duration_seconds, type_write_stage_wait_build)
.Observe(sw.elapsedSecondsFromLastTime());

if (!is_cache_ready)
{
end_key_and_segment_ids.emplace_back(segment_info.range.getEnd().toRowKeyValue(), segment_info.segment_id);
}
current_segment_id = segment_info.next_segment_id;
if (!(segment_info.range.shrink(target_range).none()))
{
segment_infos.emplace_back(segment_info);
}
// if not build cache, stop as early as possible.
if (is_cache_ready && segment_info.range.end.value->compare(*target_range.end.value) >= 0)
{
break;
// We are the cache builder.
FAIL_POINT_PAUSE(FailPoints::pause_when_building_fap_segments);

auto res = build_segments(is_cache_ready, DELTA_MERGE_FIRST_SEGMENT_ID);
// After all segments are scanned, we try to build a cache,
// so other FAP tasks that share the same checkpoint could reuse the cache.
if (!res)
return {};
auto & [end_key_and_segment_ids, segment_infos] = *res;
LOG_DEBUG(log, "Segment meta info cache has been built, num_segments={}", end_key_and_segment_ids.size());
end_to_segment_id_cache->build(lock, std::move(end_key_and_segment_ids));
return std::move(segment_infos);
}
}
if (!is_cache_ready)
{
LOG_DEBUG(
Logger::get(),
"Build cache for keyspace {} table {} with {} segments",
context.keyspace_id,
context.physical_table_id,
end_key_and_segment_ids.size());
end_to_segment_id_cache->build(lock, std::move(end_key_and_segment_ids));
// If we found the cache is built, which could be normal cases when the checkpoint is reused.
auto lock = end_to_segment_id_cache->readLock();
bool is_cache_ready = end_to_segment_id_cache->isReady(lock);
RUNTIME_CHECK(is_cache_ready, checkpoint_info->region_id, context.keyspace_id, context.physical_table_id);
GET_METRIC(tiflash_fap_task_result, type_reuse_chkpt_cache).Increment();
// ... then we could seek to `current_segment_id` in cache to avoid some read.
auto current_segment_id
= end_to_segment_id_cache->getSegmentIdContainingKey(lock, target_range.getStart().toRowKeyValue());
auto res = build_segments(is_cache_ready, current_segment_id);
if (!res)
return {};
return std::move(res->second);
}

if (cancel_handle->isCanceled())
{
LOG_INFO(log, "FAP is canceled when building segments");
// FAP task would be cleaned in FastAddPeerImplWrite. So returning incompelete result could be OK.
return {};
}
return segment_infos;
}

Segments Segment::createTargetSegmentsFromCheckpoint( //
const LoggerPtr & parent_log,
UInt64 region_id,
DMContext & context,
StoreID remote_store_id,
const SegmentMetaInfos & meta_infos,
const RowKeyRange & range,
UniversalPageStoragePtr temp_ps,
WriteBatches & wbs)
{
UNUSED(remote_store_id);
Segments segments;
for (const auto & segment_info : meta_infos)
{
LOG_DEBUG(
parent_log,
"Create segment begin. Delta id {} stable id {} range {} epoch {} next_segment_id {}",
"Create segment begin. delta_id={} stable_id={} range={} epoch={} next_segment_id={} remote_store_id={} "
"region_id={}",
segment_info.delta_id,
segment_info.stable_id,
segment_info.range.toDebugString(),
segment_info.epoch,
segment_info.next_segment_id);
segment_info.next_segment_id,
remote_store_id,
region_id);
auto stable = StableValueSpace::createFromCheckpoint(parent_log, context, temp_ps, segment_info.stable_id, wbs);
auto delta = DeltaValueSpace::createFromCheckpoint(
parent_log,
Expand All @@ -534,7 +596,7 @@ Segments Segment::createTargetSegmentsFromCheckpoint( //
segment_info.delta_id,
wbs);
auto segment = std::make_shared<Segment>(
Logger::get("Checkpoint"),
Logger::get(fmt::format("Checkpoint(region_id={})", region_id)),
segment_info.epoch,
segment_info.range.shrink(range),
segment_info.segment_id,
Expand All @@ -544,12 +606,15 @@ Segments Segment::createTargetSegmentsFromCheckpoint( //
segments.push_back(segment);
LOG_DEBUG(
parent_log,
"Create segment end. Delta id {} stable id {} range {} epoch {} next_segment_id {}",
"Create segment end. delta_id={} stable_id={} range={} epoch={} next_segment_id={} remote_store_id={} "
"region_id={}",
segment_info.delta_id,
segment_info.stable_id,
segment_info.range.toDebugString(),
segment_info.epoch,
segment_info.next_segment_id);
segment_info.next_segment_id,
remote_store_id,
region_id);
}
return segments;
}
Expand Down
Loading