Skip to content

Commit

Permalink
Merge pull request redpanda-data#23332 from redpanda-data/clang-forma…
Browse files Browse the repository at this point in the history
…t-18-upgrade

chore: upgrade to clang-format-18
  • Loading branch information
dotnwat authored Sep 17, 2024
2 parents dff91ba + fc82200 commit d71cd38
Show file tree
Hide file tree
Showing 175 changed files with 761 additions and 710 deletions.
14 changes: 6 additions & 8 deletions .github/workflows/lint-cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,14 @@ jobs:
name: Lint files with clang-format
runs-on: ubuntu-latest
steps:

- name: Checkout code
uses: actions/checkout@v4

- name: Install Baselisk
run: |
mkdir -v -p $HOME/.local/bin
wget -O $HOME/.local/bin/bazel https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-linux-amd64
chmod +x $HOME/.local/bin/bazel
- name: Run clang-format
run: |
docker run \
-v $PWD:/redpanda ubuntu:noble \
bash -c "cd /redpanda && \
apt update && \
apt install -y git clang-format-16 && \
find . -type f -regex '.*\.\(cpp\|h\|hpp\|cc\|proto\|java\)' | xargs -n1 clang-format-16 -i -style=file -fallback-style=none"
bazel run //tools:clang_format
git diff --exit-code
4 changes: 2 additions & 2 deletions src/v/cloud_io/io_resources.cc
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,8 @@ ss::future<std::optional<device_throughput>> get_storage_device_throughput() {

io_resources::io_resources()
: _max_concurrent_hydrations_per_shard(
config::shard_local_cfg()
.cloud_storage_max_concurrent_hydrations_per_shard.bind())
config::shard_local_cfg()
.cloud_storage_max_concurrent_hydrations_per_shard.bind())
, _hydration_units(max_parallel_hydrations(), "cst_hydrations")
, _throughput_limit(
// apply shard limit to downloads
Expand Down
2 changes: 1 addition & 1 deletion src/v/cloud_roles/aws_refresh_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ aws_refresh_impl::aws_refresh_impl(
ss::abort_source& as,
retry_params retry_params)
: refresh_credentials::impl(
std::move(address), std::move(region), as, retry_params) {}
std::move(address), std::move(region), as, retry_params) {}

bool aws_refresh_impl::is_fallback_required(const api_request_error& response) {
return std::find(
Expand Down
2 changes: 1 addition & 1 deletion src/v/cloud_roles/aws_sts_refresh_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ aws_sts_refresh_impl::aws_sts_refresh_impl(
ss::abort_source& as,
retry_params retry_params)
: refresh_credentials::impl(
std::move(address), std::move(region), as, retry_params)
std::move(address), std::move(region), as, retry_params)
, _role{load_from_env(aws_injected_env_vars::role_arn)}
, _token_file_path{load_from_env(aws_injected_env_vars::token_file_path)} {}

Expand Down
47 changes: 24 additions & 23 deletions src/v/cloud_roles/azure_aks_refresh_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,29 +52,30 @@ azure_aks_refresh_impl::azure_aks_refresh_impl(
ss::abort_source& as,
retry_params retry_params)
: refresh_credentials::impl(
[&] {
if (!address.host().empty()) {
// non-empty host: it's an override that we should use
return std::move(address);
}

// try to interpret AZURE_AUTHORITY_HOST as a URL, if it fails,
// assume it's an hostname
auto authority_host = load_from_env(env_var_azure_authority_host);
if (auto url = ada::parse<ada::url>(authority_host); url.has_value()) {
auto is_https = url->get_protocol() == "https:";
// use port if it's set, otherwise fallback on the default 443 for
// https and 80 for http
auto port = url->port.value_or(is_https ? default_port : 80);
auto hostname = url->get_hostname();
return net::unresolved_address{hostname, port};
}

return net::unresolved_address{authority_host, default_port};
}(),
std::move(region),
as,
retry_params)
[&] {
if (!address.host().empty()) {
// non-empty host: it's an override that we should use
return std::move(address);
}

// try to interpret AZURE_AUTHORITY_HOST as a URL, if it fails,
// assume it's an hostname
auto authority_host = load_from_env(env_var_azure_authority_host);
if (auto url = ada::parse<ada::url>(authority_host);
url.has_value()) {
auto is_https = url->get_protocol() == "https:";
// use port if it's set, otherwise fallback on the default 443 for
// https and 80 for http
auto port = url->port.value_or(is_https ? default_port : 80);
auto hostname = url->get_hostname();
return net::unresolved_address{hostname, port};
}

return net::unresolved_address{authority_host, default_port};
}(),
std::move(region),
as,
retry_params)
, client_id_{load_from_env(env_var_azure_client_id)}
, tenant_id_{load_from_env(env_var_azure_tenant_id)}
, federated_token_file_{load_from_env(env_var_azure_federated_token_file)} {}
Expand Down
2 changes: 1 addition & 1 deletion src/v/cloud_roles/azure_vm_refresh_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ azure_vm_refresh_impl::azure_vm_refresh_impl(
ss::abort_source& as,
retry_params retry_params)
: refresh_credentials::impl(
std::move(address), std::move(region), as, retry_params) {}
std::move(address), std::move(region), as, retry_params) {}

std::ostream& azure_vm_refresh_impl::print(std::ostream& os) const {
fmt::print(os, "azure_vm_refresh_impl{{address:{}}}", address());
Expand Down
2 changes: 1 addition & 1 deletion src/v/cloud_roles/gcp_refresh_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ gcp_refresh_impl::gcp_refresh_impl(
ss::abort_source& as,
retry_params retry_params)
: refresh_credentials::impl(
std::move(address), std::move(region), as, retry_params) {}
std::move(address), std::move(region), as, retry_params) {}

ss::future<api_response> gcp_refresh_impl::fetch_credentials() {
http::client::request_header oauth_req;
Expand Down
5 changes: 2 additions & 3 deletions src/v/cloud_storage/partition_manifest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2561,9 +2561,8 @@ static_assert(

// construct partition_manifest_serde while keeping
// std::is_aggregate<partition_manifest_serde> true
static auto
partition_manifest_serde_from_partition_manifest(partition_manifest const& m)
-> partition_manifest_serde {
static auto partition_manifest_serde_from_partition_manifest(
partition_manifest const& m) -> partition_manifest_serde {
partition_manifest_serde tmp{};
// copy every field that is not segment_meta_cstore in
// partition_manifest_serde, and uses to_iobuf for segment_meta_cstore
Expand Down
2 changes: 1 addition & 1 deletion src/v/cloud_storage/remote_file.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ remote_file::remote_file(
, _rtc(&retry_parent)
, _ctxlog(cst_log, _rtc, std::move(log_prefix))
, _metrics(std::move(metrics))
, _cache_backoff_jitter(cache_thrash_backoff){};
, _cache_backoff_jitter(cache_thrash_backoff) {};

ss::future<ss::file> remote_file::hydrate_readable_file() {
ss::gate::holder g(_gate);
Expand Down
8 changes: 4 additions & 4 deletions src/v/cloud_storage/tests/cloud_storage_e2e_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ class EndToEndFixture
public:
EndToEndFixture()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
// No expectations: tests will PUT and GET organically.
set_expectations_and_listen({});
wait_for_controller_leadership().get();
Expand Down Expand Up @@ -367,8 +367,8 @@ class CloudStorageEndToEndManualTest
static constexpr auto segs_per_spill = 10;
CloudStorageEndToEndManualTest()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
// No expectations: tests will PUT and GET organically.
set_expectations_and_listen({});
wait_for_controller_leadership().get();
Expand Down
4 changes: 2 additions & 2 deletions src/v/cloud_storage/tests/delete_records_e2e_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ class delete_records_e2e_fixture
static constexpr auto segs_per_spill = 10;
delete_records_e2e_fixture()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
// No expectations: tests will PUT and GET organically.
set_expectations_and_listen({});
wait_for_controller_leadership().get();
Expand Down
4 changes: 2 additions & 2 deletions src/v/cloud_storage/tests/manual_fixture.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ class cloud_storage_manual_multinode_test_base
public:
cloud_storage_manual_multinode_test_base()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
// No expectations: tests will PUT and GET organically.
set_expectations_and_listen({});

Expand Down
4 changes: 2 additions & 2 deletions src/v/cloud_storage/tests/read_replica_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ class read_replica_e2e_fixture
public:
read_replica_e2e_fixture()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
// No expectations: tests will PUT and GET organically.
set_expectations_and_listen({});
wait_for_controller_leadership().get();
Expand Down
8 changes: 4 additions & 4 deletions src/v/cloud_storage/tests/remote_path_provider_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -337,10 +337,10 @@ class LabelParamRemotePathProviderTest : public ::testing::TestWithParam<bool> {
public:
LabelParamRemotePathProviderTest()
: path_provider(
GetParam() ? std::make_optional<remote_label>(
model::cluster_uuid{uuid_t::create()})
: std::nullopt,
std::nullopt) {}
GetParam() ? std::make_optional<remote_label>(
model::cluster_uuid{uuid_t::create()})
: std::nullopt,
std::nullopt) {}

protected:
const remote_path_provider path_provider;
Expand Down
2 changes: 1 addition & 1 deletion src/v/cloud_storage/tests/remote_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ template<model::cloud_storage_backend backend>
struct backend_override_mixin_t {
backend_override_mixin_t()
: _default_backend(
config::shard_local_cfg().cloud_storage_backend.value()) {
config::shard_local_cfg().cloud_storage_backend.value()) {
config::shard_local_cfg().cloud_storage_backend.set_value(
model::cloud_storage_backend::google_s3_compat);
}
Expand Down
4 changes: 2 additions & 2 deletions src/v/cloud_storage/tests/s3_imposter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ ss::sstring list_objects_resp(
// delimiter.
auto max_keys = max_keys_opt.has_value()
? std::min(
max_keys_opt.value(),
s3_imposter_fixture::default_max_keys)
max_keys_opt.value(),
s3_imposter_fixture::default_max_keys)
: s3_imposter_fixture::default_max_keys;
auto it = (continuation_token_opt.has_value())
? objects.find(continuation_token_opt.value())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ class TopicRecoveryFixture
public:
TopicRecoveryFixture()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{}, httpd_port_number())
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number())
, bucket(cloud_storage_clients::bucket_name("test-bucket")) {
set_expectations_and_listen({});
wait_for_controller_leadership().get();
Expand Down
4 changes: 2 additions & 2 deletions src/v/cloud_storage/tests/topic_recovery_service_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,8 @@ class fixture
public:
fixture()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
redpanda_thread_fixture::init_cloud_storage_tag{},
httpd_port_number()) {
// This test will manually set expectations for list requests.
set_search_on_get_list(false);
}
Expand Down
8 changes: 4 additions & 4 deletions src/v/cloud_storage_clients/abs_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -422,8 +422,8 @@ abs_client::abs_client(
const abs_configuration& conf,
ss::lw_shared_ptr<const cloud_roles::apply_credentials> apply_credentials)
: _data_lake_v2_client_config(
conf.is_hns_enabled ? std::make_optional(conf.make_adls_configuration())
: std::nullopt)
conf.is_hns_enabled ? std::make_optional(conf.make_adls_configuration())
: std::nullopt)
, _is_oauth(apply_credentials->is_oauth())
, _requestor(conf, std::move(apply_credentials))
, _client(conf)
Expand All @@ -439,8 +439,8 @@ abs_client::abs_client(
const ss::abort_source& as,
ss::lw_shared_ptr<const cloud_roles::apply_credentials> apply_credentials)
: _data_lake_v2_client_config(
conf.is_hns_enabled ? std::make_optional(conf.make_adls_configuration())
: std::nullopt)
conf.is_hns_enabled ? std::make_optional(conf.make_adls_configuration())
: std::nullopt)
, _is_oauth(apply_credentials->is_oauth())
, _requestor(conf, std::move(apply_credentials))
, _client(conf, &as, conf._probe, conf.max_idle_time)
Expand Down
2 changes: 1 addition & 1 deletion src/v/cloud_storage_clients/client_pool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ client_pool::acquire(ss::abort_source& as) {
// sid1 == sid2 if we have only two shards
auto cnt2 = sid1 == sid2 ? cnt1
: co_await container().invoke_on(
sid2, clients_in_use);
sid2, clients_in_use);
auto [sid, cnt] = cnt1 < cnt2 ? std::tie(sid1, cnt1)
: std::tie(sid2, cnt2);
vlog(
Expand Down
16 changes: 8 additions & 8 deletions src/v/cluster/archival/archiver_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -691,13 +691,13 @@ struct managed_partition : public managed_partition_fsm::state_machine_t {
cloud_storage::cache& cache,
archival::upload_housekeeping_service& housekeeping)
: managed_partition_fsm::state_machine_t(
ntp,
broker_id,
std::move(part),
std::move(config),
remote,
cache,
housekeeping)
ntp,
broker_id,
std::move(part),
std::move(config),
remote,
cache,
housekeeping)
, _ntp(ntp)
, _node_id(broker_id) {}

Expand Down Expand Up @@ -1022,7 +1022,7 @@ archiver_manager::archiver_manager(
ss::sharded<archival::upload_housekeeping_service>& upload_housekeeping,
ss::lw_shared_ptr<const configuration> config)
: _impl(std::make_unique<impl>(
node_id, pm, gm, api, cache, upload_housekeeping, config)) {}
node_id, pm, gm, api, cache, upload_housekeeping, config)) {}

archiver_manager::~archiver_manager() {}

Expand Down
2 changes: 1 addition & 1 deletion src/v/cluster/archival/tests/async_data_uploader_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class async_data_uploader_fixture : public redpanda_thread_fixture {
public:
async_data_uploader_fixture()
: redpanda_thread_fixture(
redpanda_thread_fixture::init_cloud_storage_tag{}) {
redpanda_thread_fixture::init_cloud_storage_tag{}) {
wait_for_controller_leadership().get();
}

Expand Down
5 changes: 2 additions & 3 deletions src/v/cluster/archival/tests/service_fixture.cc
Original file line number Diff line number Diff line change
Expand Up @@ -136,9 +136,8 @@ segment_layout write_random_batches(
.offset = seg->offsets().get_committed_offset()
== model::offset{}
? seg->offsets().get_base_offset()
: (
seg->offsets().get_committed_offset()
+ model::offset_delta{1}),
: (seg->offsets().get_committed_offset()
+ model::offset_delta{1}),
.allow_compression = true,
.count = full_batches_count,
.records = records_per_batch,
Expand Down
2 changes: 1 addition & 1 deletion src/v/cluster/archival/types.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ get_archival_service_config(ss::scheduling_group sg, ss::io_priority_class p) {
"cloud_storage_segment_max_upload_interval_sec is invalid");
}
auto time_limit_opt = time_limit ? std::make_optional(
segment_time_limit(*time_limit))
segment_time_limit(*time_limit))
: std::nullopt;

const auto& bucket_config
Expand Down
6 changes: 3 additions & 3 deletions src/v/cluster/archival/upload_controller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ upload_controller::upload_controller(
ss::sharded<cluster::partition_manager>& partition_manager,
storage::backlog_controller_config cfg)
: _ctrl(
std::make_unique<upload_backlog_sampler>(partition_manager),
upload_ctrl_log,
cfg) {
std::make_unique<upload_backlog_sampler>(partition_manager),
upload_ctrl_log,
cfg) {
_ctrl.setup_metrics("archival:upload");
}

Expand Down
2 changes: 1 addition & 1 deletion src/v/cluster/client_quota_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ namespace cluster::client_quota {
class backend final {
public:
explicit backend(ss::sharded<store>& quotas)
: _quotas(quotas){};
: _quotas(quotas) {};

static constexpr auto commands
= make_commands_list<alter_quotas_delta_cmd>();
Expand Down
24 changes: 12 additions & 12 deletions src/v/cluster/cloud_metadata/offsets_recovery_router.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,18 +64,18 @@ class offsets_recovery_router
ss::sharded<partition_leaders_table>& leaders,
const model::node_id node_id)
: leader_router<
offsets_recovery_request,
offsets_recovery_reply,
offsets_recovery_handler>(
shard_table,
metadata_cache,
connection_cache,
leaders,
_handler,
node_id,
config::shard_local_cfg()
.cloud_storage_cluster_metadata_retries.value(),
5s)
offsets_recovery_request,
offsets_recovery_reply,
offsets_recovery_handler>(
shard_table,
metadata_cache,
connection_cache,
leaders,
_handler,
node_id,
config::shard_local_cfg()
.cloud_storage_cluster_metadata_retries.value(),
5s)
, _handler(offsets_recoverer) {}

ss::future<> start() { co_return; }
Expand Down
Loading

0 comments on commit d71cd38

Please sign in to comment.