Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Minor improvements to template function handling. Preliminary for the work in PR#230 #234

Merged
merged 1 commit into from
Apr 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1 @@
CMakeLists.txt.user
CMakeLists.txt.user*
108 changes: 39 additions & 69 deletions include/mqtt/endpoint.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2623,7 +2623,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
auto topic_buf = as::buffer(*sp_topic_name);
auto contents_buf = as::buffer(*sp_contents);

send_publish_keep_lifetime(
send_publish(
topic_buf,
qos::at_least_once,
retain,
Expand Down Expand Up @@ -2663,7 +2663,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
std::vector<v5::property_variant> props = {}
) {

send_publish_keep_lifetime(
send_publish(
topic_name,
qos::at_least_once,
retain,
Expand Down Expand Up @@ -2707,7 +2707,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
auto topic_buf = as::buffer(*sp_topic_name);
auto contents_buf = as::buffer(*sp_contents);

send_publish_keep_lifetime(
send_publish(
topic_buf,
qos::exactly_once,
retain,
Expand Down Expand Up @@ -2747,7 +2747,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
std::vector<v5::property_variant> props = {}
) {

send_publish_keep_lifetime(
send_publish(
topic_name,
qos::exactly_once,
retain,
Expand Down Expand Up @@ -2797,7 +2797,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
auto topic_buf = as::buffer(*sp_topic_name);
auto contents_buf = as::buffer(*sp_contents);

send_publish_keep_lifetime(
send_publish(
topic_buf,
qos,
retain,
Expand Down Expand Up @@ -2843,7 +2843,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
BOOST_ASSERT(qos == qos::at_most_once || qos == qos::at_least_once || qos == qos::exactly_once);
BOOST_ASSERT((qos == qos::at_most_once && packet_id == 0) || (qos != qos::at_most_once && packet_id != 0));

send_publish_keep_lifetime(
send_publish(
topic_name,
qos,
retain,
Expand Down Expand Up @@ -2893,7 +2893,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
auto topic_buf = as::buffer(*sp_topic_name);
auto contents_buf = as::buffer(*sp_contents);

send_publish_keep_lifetime(
send_publish(
topic_buf,
qos,
retain,
Expand Down Expand Up @@ -2939,7 +2939,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
BOOST_ASSERT(qos == qos::at_most_once || qos == qos::at_least_once || qos == qos::exactly_once);
BOOST_ASSERT((qos == qos::at_most_once && packet_id == 0) || (qos != qos::at_most_once && packet_id != 0));

send_publish_keep_lifetime(
send_publish(
topic_name,
qos,
retain,
Expand Down Expand Up @@ -7705,7 +7705,8 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
acquired_async_subscribe_imp(
packet_id_t packet_id,
std::string topic_name,
std::uint8_t qos, Args&&... args) {
std::uint8_t qos,
Args&&... args) {

std::vector<std::tuple<as::const_buffer, std::uint8_t>> params;
params.reserve((sizeof...(args) + 2) / 2);
Expand All @@ -7730,7 +7731,8 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
acquired_async_subscribe_imp(
packet_id_t packet_id,
as::const_buffer topic_name,
std::uint8_t qos, Args&&... args) {
std::uint8_t qos,
Args&&... args) {

std::vector<std::tuple<as::const_buffer, std::uint8_t>> params;
params.reserve((sizeof...(args) + 2) / 2);
Expand All @@ -7754,7 +7756,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
>::type
acquired_async_subscribe_imp(
packet_id_t packet_id,
mqtt::string_view topic_name,
std::string topic_name,
std::uint8_t qos,
Args&&... args) {

Expand Down Expand Up @@ -7906,9 +7908,9 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
>::type
async_suback_imp(
packet_id_t packet_id,
std::uint8_t qos, Args&&... args) {
std::vector<std::uint8_t> params;
async_send_suback(params, packet_id, qos, std::forward<Args>(args)...);
std::uint8_t qos,
Args&&... args) {
async_send_suback(std::vector<std::uint8_t>(), packet_id, qos, std::forward<Args>(args)...);
}

template <typename... Args>
Expand All @@ -7920,9 +7922,8 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
>::type
async_suback_imp(
packet_id_t packet_id,
std::uint8_t qos, Args&&... args) {
std::vector<std::uint8_t> params;
async_send_suback(std::move(params), packet_id, qos, std::forward<Args>(args)..., async_handler_t());
Args&&... qos) {
async_send_suback(std::vector<std::uint8_t>({qos...}), packet_id, async_handler_t());
}

template <typename... Args>
Expand All @@ -7934,9 +7935,9 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
>::type
async_unsuback_imp(
packet_id_t packet_id,
std::uint8_t qos, Args&&... args) {
std::vector<std::uint8_t> params;
async_send_unsuback(std::move(params), packet_id, qos, std::forward<Args>(args)...);
std::uint8_t qos,
Args&&... args) {
async_send_unsuback(std::vector<std::uint8_t>(), packet_id, qos, std::forward<Args>(args)...);
}

template <typename... Args>
Expand All @@ -7948,9 +7949,8 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
>::type
async_unsuback_imp(
packet_id_t packet_id,
std::uint8_t qos, Args&&... args) {
std::vector<std::uint8_t> params;
async_send_unsuback(std::move(params), packet_id, qos, std::forward<Args>(args)..., async_handler_t());
Args&&... qos) {
async_send_unsuback(std::vector<std::uint8_t>({qos...}), packet_id, async_handler_t());
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do parameter pack expansion to create the vector directly. No need for recursion.

}

class send_buffer {
Expand Down Expand Up @@ -9415,7 +9415,7 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
}
}

void send_publish_keep_lifetime(
void send_publish(
as::const_buffer topic_name,
std::uint8_t qos,
bool retain,
Expand Down Expand Up @@ -9491,47 +9491,6 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
}
}

void send_publish_no_lifetime(
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry to put this in and then take it out, but after considering it more, I don't think that this function needs to have two different versions. The second version essentially only saved a single conditional, after the optimizer finished with it.

as::const_buffer topic_name,
std::uint8_t qos,
bool retain,
bool dup,
packet_id_t packet_id,
std::vector<v5::property_variant> props,
as::const_buffer payload) {

switch (version_) {
case protocol_version::v3_1_1:
do_sync_write(
v3_1_1::basic_publish_message<PacketIdBytes>(
topic_name,
qos,
retain,
dup,
packet_id,
payload
)
);
break;
case protocol_version::v5:
do_sync_write(
v5::basic_publish_message<PacketIdBytes>(
topic_name,
qos,
retain,
dup,
packet_id,
std::move(props),
payload
)
);
break;
default:
BOOST_ASSERT(false);
break;
}
}

void send_puback(
packet_id_t packet_id,
mqtt::optional<std::uint8_t> reason = mqtt::nullopt,
Expand Down Expand Up @@ -9688,8 +9647,9 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
std::vector<std::tuple<as::const_buffer, std::uint8_t>>&& params,
packet_id_t packet_id,
as::const_buffer topic_name,
std::uint8_t qos, Args&&... args) {
params.emplace_back(std::move(topic_name), qos);
std::uint8_t qos,
Args&&... args) {
params.emplace_back(topic_name, qos);
send_subscribe(std::move(params), packet_id, std::forward<Args>(args)...);
}

Expand All @@ -9698,7 +9658,8 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
std::vector<std::tuple<as::const_buffer, std::uint8_t>>&& params,
packet_id_t packet_id,
mqtt::string_view topic_name,
std::uint8_t qos, Args&&... args) {
std::uint8_t qos,
Args&&... args) {
params.emplace_back(as::buffer(topic_name.data(), topic_name.size()), qos);
send_subscribe(std::move(params), packet_id, std::forward<Args>(args)...);
}
Expand All @@ -9708,6 +9669,14 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
packet_id_t packet_id,
std::vector<v5::property_variant> props = {}
) {
for(auto const& p : params)
{
BOOST_ASSERT(
subscribe::get_qos(std::get<1>(p)) == qos::at_most_once ||
subscribe::get_qos(std::get<1>(p)) == qos::at_least_once ||
subscribe::get_qos(std::get<1>(p)) == qos::exactly_once
);
}
switch (version_) {
case protocol_version::v3_1_1:
do_sync_write(v3_1_1::basic_subscribe_message<PacketIdBytes>(std::move(params), packet_id));
Expand Down Expand Up @@ -9807,7 +9776,8 @@ class endpoint : public std::enable_shared_from_this<endpoint<Socket, Mutex, Loc
void send_unsuback(
std::vector<std::uint8_t>&& params,
packet_id_t packet_id,
std::uint8_t reason, Args&&... args) {
std::uint8_t reason,
Args&&... args) {
params.push_back(reason);
send_suback(std::move(params), packet_id, std::forward<Args>(args)...);
}
Expand Down