Skip to content

Commit

Permalink
Merge pull request redpanda-data#15204 from vbotbuildovich/backport-p…
Browse files Browse the repository at this point in the history
…r-15193-v23.2.x-643

[v23.2.x] format exception fixes
  • Loading branch information
andijcr authored Nov 29, 2023
2 parents 65e8630 + d813188 commit 5768117
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 2 deletions.
1 change: 1 addition & 0 deletions src/v/cluster/tx_gateway_frontend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1035,6 +1035,7 @@ ss::future<cluster::init_tm_tx_reply> tx_gateway_frontend::init_tm_tx(
txlog.trace,
"[tx_id: {}] waiting for {} topic to apper in metadata cache, "
"retries left: {}",
tx_id,
model::tx_manager_nt,
retries);
if (_metadata_cache.local().contains(model::tx_manager_nt)) {
Expand Down
3 changes: 2 additions & 1 deletion src/v/raft/consensus.cc
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,8 @@ consensus::update_group_member(model::broker broker) {
if (!cfg.contains_broker(broker.id())) {
vlog(
_ctxlog.warn,
"Node with id {} does not exists in current configuration");
"Node with id {} does not exists in current configuration",
broker.id());
return ss::make_ready_future<std::error_code>(
errc::node_does_not_exists);
}
Expand Down
1 change: 1 addition & 0 deletions src/v/storage/offset_translator_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ model::offset offset_translator_state::to_log_offset(
vassert(
interval_end_it != _last_offset2batch.begin(),
"ntp {}: log offset search start too small: {}",
_ntp,
search_start);
auto delta = std::prev(interval_end_it)->second.next_delta;

Expand Down
6 changes: 5 additions & 1 deletion src/v/storage/segment_appender.cc
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ ss::future<> segment_appender::truncate(size_t n) {
n <= file_byte_offset(),
"Cannot ask to truncate at:{} which is more bytes than we have:{} - {}",
file_byte_offset(),
n,
*this);
return hard_flush()
.then([this, n] { return do_truncation(n); })
Expand Down Expand Up @@ -663,7 +664,10 @@ bool segment_appender::inflight_write::try_merge(
// writes should form a contiguous series of writes and we only check
// the last write for merging.
vassert(
committed_offset == pco, "in try_merge writes didn't touch: {} {}");
committed_offset == pco,
"in try_merge writes didn't touch: {} {}",
committed_offset,
pco);

// the lhs write cannot be full since then how could the next write
// share its chunk: it must use a new chunk
Expand Down

0 comments on commit 5768117

Please sign in to comment.