public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] repo/proj/guru:dev commit in: dev-cpp/cpprestsdk/files/, dev-cpp/cpprestsdk/
@ 2025-05-24  9:06 Jan-Espen Oversand
  0 siblings, 0 replies; only message in thread
From: Jan-Espen Oversand @ 2025-05-24  9:06 UTC (permalink / raw
  To: gentoo-commits

commit:     8767d47e9c10cbaf127b6a7201c06be0ad787330
Author:     Jan-Espen Oversand <sigsegv <AT> radiotube <DOT> org>
AuthorDate: Sat May 24 09:06:07 2025 +0000
Commit:     Jan-Espen Oversand <sigsegv <AT> radiotube <DOT> org>
CommitDate: Sat May 24 09:06:07 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=8767d47e

dev-cpp/cpprestsdk: boost 1.87 compat patch

Signed-off-by: Jan-Espen Oversand <sigsegv <AT> radiotube.org>

 dev-cpp/cpprestsdk/cpprestsdk-2.10.19-r4.ebuild    |  38 +++
 .../cpprestsdk-2.10.19-boost-1.87-compat.patch     | 359 +++++++++++++++++++++
 2 files changed, 397 insertions(+)

diff --git a/dev-cpp/cpprestsdk/cpprestsdk-2.10.19-r4.ebuild b/dev-cpp/cpprestsdk/cpprestsdk-2.10.19-r4.ebuild
new file mode 100644
index 0000000000..3b30f85340
--- /dev/null
+++ b/dev-cpp/cpprestsdk/cpprestsdk-2.10.19-r4.ebuild
@@ -0,0 +1,38 @@
+# Copyright 1999-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+inherit cmake
+
+DESCRIPTION="A Microsoft project for cloud-based client-server communication."
+HOMEPAGE="https://github.com/microsoft/cpprestsdk"
+SRC_URI="https://github.com/microsoft/cpprestsdk/archive/refs/tags/v${PV}.tar.gz -> ${P}.tar.gz"
+LICENSE="MIT"
+SLOT="0"
+KEYWORDS="~amd64"
+
+RDEPEND=">=dev-libs/openssl-1.1.1q
+		>=dev-libs/boost-1.80.0-r1
+		>=sys-libs/zlib-1.2.13-r1"
+
+DEPEND="${RDEPEND}"
+
+BDEPEND="
+		app-alternatives/ninja
+		>=sys-devel/gcc-11.3.0
+		>=virtual/pkgconfig-2-r1
+		>=dev-cpp/websocketpp-0.8.2
+"
+
+PATCHES=(
+	"${FILESDIR}"/cpprestsdk-${PV}-warnings.patch
+	"${FILESDIR}"/cpprestsdk-${PV}-disabl-int-tests.patch
+	"${FILESDIR}"/cpprestsdk-${PV}-disable-werror-default.patch
+	"${FILESDIR}"/cpprestsdk-${PV}-boost-1.87-compat.patch
+)
+
+src_configure() {
+	local mycmakeargs=( -DCMAKE_BUILD_TYPE=Release )
+	cmake_src_configure
+}

diff --git a/dev-cpp/cpprestsdk/files/cpprestsdk-2.10.19-boost-1.87-compat.patch b/dev-cpp/cpprestsdk/files/cpprestsdk-2.10.19-boost-1.87-compat.patch
new file mode 100644
index 0000000000..e0e8a75d77
--- /dev/null
+++ b/dev-cpp/cpprestsdk/files/cpprestsdk-2.10.19-boost-1.87-compat.patch
@@ -0,0 +1,359 @@
+--- a/Release/src/http/listener/http_server_asio.cpp
++++ b/Release/src/http/listener/http_server_asio.cpp
+@@ -520,17 +520,14 @@
+     auto& service = crossplat::threadpool::shared_instance().service();
+     tcp::resolver resolver(service);
+     // #446: boost resolver does not recognize "+" as a host wildchar
+-    tcp::resolver::query query =
+-        ("+" == m_host) ? tcp::resolver::query(m_port, boost::asio::ip::resolver_query_base::flags())
+-                        : tcp::resolver::query(m_host, m_port, boost::asio::ip::resolver_query_base::flags());
+ 
+-    tcp::endpoint endpoint = *resolver.resolve(query);
++    tcp::endpoint endpoint = (("+" == m_host) ? *(resolver.resolve("", m_port).begin()) : *(resolver.resolve(m_host, m_port).begin()));
+ 
+     m_acceptor.reset(new tcp::acceptor(service));
+     m_acceptor->open(endpoint.protocol());
+     m_acceptor->set_option(socket_base::reuse_address(true));
+     m_acceptor->bind(endpoint);
+-    m_acceptor->listen(0 != m_backlog ? m_backlog : socket_base::max_connections);
++    m_acceptor->listen(0 != m_backlog ? m_backlog : socket_base::max_listen_connections);
+ 
+     auto socket = new ip::tcp::socket(service);
+     std::unique_ptr<ip::tcp::socket> usocket(socket);
+@@ -881,7 +878,7 @@
+     else
+     {
+         auto writebuf = requestImpl->outstream().streambuf();
+-        writebuf.putn_nocopy(buffer_cast<const uint8_t*>(m_request_buf.data()), toWrite)
++        writebuf.putn_nocopy(static_cast<const uint8_t*>(m_request_buf.data().data()), toWrite)
+             .then([=](pplx::task<size_t> writeChunkTask) -> will_deref_t {
+                 try
+                 {
+@@ -913,7 +910,7 @@
+     {
+         auto writebuf = requestImpl->outstream().streambuf();
+         writebuf
+-            .putn_nocopy(boost::asio::buffer_cast<const uint8_t*>(m_request_buf.data()),
++            .putn_nocopy(static_cast<const uint8_t*>(m_request_buf.data().data()),
+                          (std::min)(m_request_buf.size(), m_read_size - m_read))
+             .then([this](pplx::task<size_t> writtenSizeTask) -> will_deref_t {
+                 size_t writtenSize = 0;
+@@ -1134,7 +1131,7 @@
+     }
+     auto membuf = m_response_buf.prepare(ChunkSize + chunked_encoding::additional_encoding_space);
+ 
+-    readbuf.getn(buffer_cast<uint8_t*>(membuf) + chunked_encoding::data_offset, ChunkSize)
++    readbuf.getn(static_cast<uint8_t*>(membuf.data()) + chunked_encoding::data_offset, ChunkSize)
+         .then([=](pplx::task<size_t> actualSizeTask) -> will_deref_and_erase_t {
+             size_t actualSize = 0;
+             try
+@@ -1146,7 +1143,7 @@
+                 return cancel_sending_response_with_error(response, std::current_exception());
+             }
+             size_t offset = chunked_encoding::add_chunked_delimiters(
+-                buffer_cast<uint8_t*>(membuf), ChunkSize + chunked_encoding::additional_encoding_space, actualSize);
++                static_cast<uint8_t*>(membuf.data()), ChunkSize + chunked_encoding::additional_encoding_space, actualSize);
+             m_response_buf.commit(actualSize + chunked_encoding::additional_encoding_space);
+             m_response_buf.consume(offset);
+             if (actualSize == 0)
+@@ -1167,7 +1164,7 @@
+         return cancel_sending_response_with_error(
+             response, std::make_exception_ptr(http_exception("Response stream close early!")));
+     size_t readBytes = (std::min)(ChunkSize, m_write_size - m_write);
+-    readbuf.getn(buffer_cast<uint8_t*>(m_response_buf.prepare(readBytes)), readBytes)
++    readbuf.getn(static_cast<uint8_t*>(m_response_buf.prepare(readBytes).data()), readBytes)
+         .then([=](pplx::task<size_t> actualSizeTask) -> will_deref_and_erase_t {
+             size_t actualSize = 0;
+             try
+--- a/Release/src/http/client/http_client_asio.cpp
++++ b/Release/src/http/client/http_client_asio.cpp
+@@ -146,7 +146,7 @@
+     friend class asio_client;
+ 
+ public:
+-    asio_connection(boost::asio::io_service& io_service)
++    asio_connection(boost::asio::io_context& io_service)
+         : m_socket_lock()
+         , m_socket(io_service)
+         , m_ssl_stream()
+@@ -581,10 +581,8 @@
+ 
+             m_context->m_timer.start();
+ 
+-            tcp::resolver::query query(utility::conversions::to_utf8string(proxy_host), to_string(proxy_port));
+-
+             auto client = std::static_pointer_cast<asio_client>(m_context->m_http_client);
+-            m_context->m_resolver.async_resolve(query,
++            m_context->m_resolver.async_resolve(utility::conversions::to_utf8string(proxy_host), to_string(proxy_port),
+                                                 boost::bind(&ssl_proxy_tunnel::handle_resolve,
+                                                             shared_from_this(),
+                                                             boost::asio::placeholders::error,
+@@ -592,8 +590,9 @@
+         }
+ 
+     private:
+-        void handle_resolve(const boost::system::error_code& ec, tcp::resolver::iterator endpoints)
++        void handle_resolve(const boost::system::error_code& ec, tcp::resolver::results_type results)
+         {
++            auto iterator = results.begin();
+             if (ec)
+             {
+                 m_context->report_error("Error resolving proxy address", ec, httpclient_errorcode_context::connect);
+@@ -601,16 +600,16 @@
+             else
+             {
+                 m_context->m_timer.reset();
+-                auto endpoint = *endpoints;
++                auto endpoint = *iterator;
+                 m_context->m_connection->async_connect(endpoint,
+                                                        boost::bind(&ssl_proxy_tunnel::handle_tcp_connect,
+                                                                    shared_from_this(),
+                                                                    boost::asio::placeholders::error,
+-                                                                   ++endpoints));
++                                                                   ++iterator, results.end()));
+             }
+         }
+ 
+-        void handle_tcp_connect(const boost::system::error_code& ec, tcp::resolver::iterator endpoints)
++        void handle_tcp_connect(const boost::system::error_code& ec, tcp::resolver::results_type::iterator endpoints, tcp::resolver::results_type::iterator endpoints_end)
+         {
+             if (!ec)
+             {
+@@ -621,7 +620,7 @@
+                                                                  shared_from_this(),
+                                                                  boost::asio::placeholders::error));
+             }
+-            else if (endpoints == tcp::resolver::iterator())
++            else if (endpoints == endpoints_end)
+             {
+                 m_context->report_error(
+                     "Failed to connect to any resolved proxy endpoint", ec, httpclient_errorcode_context::connect);
+@@ -646,7 +645,7 @@
+                                                        boost::bind(&ssl_proxy_tunnel::handle_tcp_connect,
+                                                                    shared_from_this(),
+                                                                    boost::asio::placeholders::error,
+-                                                                   ++endpoints));
++                                                                   ++endpoints, endpoints_end));
+             }
+         }
+ 
+@@ -885,8 +884,7 @@
+                 auto tcp_host = proxy_type == http_proxy_type::http ? proxy_host : host;
+                 auto tcp_port = proxy_type == http_proxy_type::http ? proxy_port : port;
+ 
+-                tcp::resolver::query query(tcp_host, to_string(tcp_port));
+-                ctx->m_resolver.async_resolve(query,
++                ctx->m_resolver.async_resolve(tcp_host, to_string(tcp_port),
+                                               boost::bind(&asio_context::handle_resolve,
+                                                           ctx,
+                                                           boost::asio::placeholders::error,
+@@ -1006,7 +1004,7 @@
+         request_context::report_error(errorcodeValue, message);
+     }
+ 
+-    void handle_connect(const boost::system::error_code& ec, tcp::resolver::iterator endpoints)
++    void handle_connect(const boost::system::error_code& ec, tcp::resolver::results_type::iterator endpoints, tcp::resolver::results_type::iterator endpoints_end)
+     {
+         m_timer.reset();
+         if (!ec)
+@@ -1019,7 +1017,7 @@
+         {
+             report_error("Request canceled by user.", ec, httpclient_errorcode_context::connect);
+         }
+-        else if (endpoints == tcp::resolver::iterator())
++        else if (endpoints == endpoints_end)
+         {
+             report_error("Failed to connect to any resolved endpoint", ec, httpclient_errorcode_context::connect);
+         }
+@@ -1041,28 +1039,29 @@
+             m_connection->async_connect(
+                 endpoint,
+                 boost::bind(
+-                    &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints));
++                    &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints, endpoints_end));
+         }
+     }
+ 
+-    void handle_resolve(const boost::system::error_code& ec, tcp::resolver::iterator endpoints)
++    void handle_resolve(const boost::system::error_code& ec, tcp::resolver::results_type results)
+     {
+         if (ec)
+         {
+             report_error("Error resolving address", ec, httpclient_errorcode_context::connect);
+         }
+-        else if (endpoints == tcp::resolver::iterator())
++        else if (results.empty())
+         {
+             report_error("Failed to resolve address", ec, httpclient_errorcode_context::connect);
+         }
+         else
+         {
+             m_timer.reset();
+-            auto endpoint = *endpoints;
++            auto iterator = results.begin();
++            auto endpoint = *iterator;
+             m_connection->async_connect(
+                 endpoint,
+                 boost::bind(
+-                    &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints));
++                    &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++iterator, results.end()));
+         }
+     }
+ 
+@@ -1134,7 +1133,7 @@
+         }
+ #endif // CPPREST_PLATFORM_ASIO_CERT_VERIFICATION_AVAILABLE
+ 
+-        boost::asio::ssl::rfc2818_verification rfc2818(m_connection->cn_hostname());
++        boost::asio::ssl::host_name_verification rfc2818(m_connection->cn_hostname());
+         return rfc2818(preverified, verifyCtx);
+     }
+ 
+@@ -1182,8 +1181,8 @@
+ 
+         const auto& chunkSize = m_http_client->client_config().chunksize();
+         auto readbuf = _get_readbuffer();
+-        uint8_t* buf = boost::asio::buffer_cast<uint8_t*>(
+-            m_body_buf.prepare(chunkSize + http::details::chunked_encoding::additional_encoding_space));
++        auto bodyBuf = m_body_buf.prepare(chunkSize + http::details::chunked_encoding::additional_encoding_space);
++        uint8_t* buf = static_cast<uint8_t *>(bodyBuf.data());
+         const auto this_request = shared_from_this();
+         readbuf.getn(buf + http::details::chunked_encoding::data_offset, chunkSize)
+             .then([this_request, buf, chunkSize AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) {
+@@ -1247,7 +1246,7 @@
+         const auto readSize = static_cast<size_t>((std::min)(
+             static_cast<uint64_t>(m_http_client->client_config().chunksize()), m_content_length - m_uploaded));
+         auto readbuf = _get_readbuffer();
+-        readbuf.getn(boost::asio::buffer_cast<uint8_t*>(m_body_buf.prepare(readSize)), readSize)
++        readbuf.getn(static_cast<uint8_t*>(m_body_buf.prepare(readSize).data()), readSize)
+             .then([this_request AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) {
+                 try
+                 {
+@@ -1639,7 +1638,7 @@
+                     std::vector<uint8_t> decompressed;
+ 
+                     bool boo =
+-                        decompress(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), to_read, decompressed);
++                        decompress(static_cast<const uint8_t*>(m_body_buf.data().data()), to_read, decompressed);
+                     if (!boo)
+                     {
+                         report_exception(std::runtime_error("Failed to decompress the response body"));
+@@ -1687,7 +1686,7 @@
+                 }
+                 else
+                 {
+-                    writeBuffer.putn_nocopy(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), to_read)
++                    writeBuffer.putn_nocopy(static_cast<const uint8_t*>(m_body_buf.data().data()), to_read)
+                         .then([this_request, to_read AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) {
+                             try
+                             {
+@@ -1759,7 +1758,7 @@
+                 std::vector<uint8_t> decompressed;
+ 
+                 bool boo =
+-                    decompress(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), read_size, decompressed);
++                    decompress(static_cast<const uint8_t*>(m_body_buf.data().data()), read_size, decompressed);
+                 if (!boo)
+                 {
+                     this_request->report_exception(std::runtime_error("Failed to decompress the response body"));
+@@ -1821,7 +1820,7 @@
+             }
+             else
+             {
+-                writeBuffer.putn_nocopy(boost::asio::buffer_cast<const uint8_t*>(m_body_buf.data()), read_size)
++                writeBuffer.putn_nocopy(static_cast<const uint8_t*>(m_body_buf.data().data()), read_size)
+                     .then([this_request AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task<size_t> op) {
+                         size_t writtenSize = 0;
+                         try
+@@ -1870,7 +1869,7 @@
+             assert(!m_ctx.expired());
+             m_state = started;
+ 
+-            m_timer.expires_from_now(m_duration);
++            m_timer.expires_after(m_duration);
+             auto ctx = m_ctx;
+             m_timer.async_wait([ctx AND_CAPTURE_MEMBER_FUNCTION_POINTERS](const boost::system::error_code& ec) {
+                 handle_timeout(ec, ctx);
+@@ -1881,7 +1880,7 @@
+         {
+             assert(m_state == started || m_state == timedout);
+             assert(!m_ctx.expired());
+-            if (m_timer.expires_from_now(m_duration) > 0)
++            if (m_timer.expires_after(m_duration) > 0)
+             {
+                 // The existing handler was canceled so schedule a new one.
+                 assert(m_state == started);
+--- a/Release/src/pplx/threadpool.cpp
++++ b/Release/src/pplx/threadpool.cpp
+@@ -37,7 +37,7 @@
+ 
+ struct threadpool_impl final : crossplat::threadpool
+ {
+-    threadpool_impl(size_t n) : crossplat::threadpool(n), m_work(m_service)
++    threadpool_impl(size_t n) : crossplat::threadpool(n), m_work(boost::asio::make_work_guard(m_service))
+     {
+         for (size_t i = 0; i < n; i++)
+             add_thread();
+@@ -84,7 +84,7 @@
+     }
+ 
+     std::vector<std::unique_ptr<boost::asio::detail::thread>> m_threads;
+-    boost::asio::io_service::work m_work;
++    boost::asio::executor_work_guard<boost::asio::io_context::executor_type > m_work;
+ };
+ 
+ #if defined(_WIN32)
+--- a/Release/src/websockets/client/ws_client_wspp.cpp
++++ b/Release/src/websockets/client/ws_client_wspp.cpp
+@@ -225,7 +225,7 @@
+                             verifyCtx, utility::conversions::to_utf8string(m_uri.host()));
+                     }
+ #endif
+-                    boost::asio::ssl::rfc2818_verification rfc2818(utility::conversions::to_utf8string(m_uri.host()));
++                    boost::asio::ssl::host_name_verification rfc2818(utility::conversions::to_utf8string(m_uri.host()));
+                     return rfc2818(preverified, verifyCtx);
+                 });
+ 
+--- a/Release/src/pplx/pplxlinux.cpp
++++ b/Release/src/pplx/pplxlinux.cpp
+@@ -35,7 +35,7 @@
+ 
+ _PPLXIMP void linux_scheduler::schedule(TaskProc_t proc, void* param)
+ {
+-    crossplat::threadpool::shared_instance().service().post(boost::bind(proc, param));
++    boost::asio::post(crossplat::threadpool::shared_instance().service(), boost::bind(proc, param));
+ }
+ 
+ } // namespace details
+--- a/Release/include/pplx/threadpool.h
++++ b/Release/include/pplx/threadpool.h
+@@ -69,15 +69,15 @@
+     CASABLANCA_DEPRECATED("Use `.service().post(task)` directly.")
+     void schedule(T task)
+     {
+-        service().post(task);
++        boost::asio::post(service(), task);
+     }
+ 
+-    boost::asio::io_service& service() { return m_service; }
++    boost::asio::io_context& service() { return m_service; }
+ 
+ protected:
+     threadpool(size_t num_threads) : m_service(static_cast<int>(num_threads)) {}
+ 
+-    boost::asio::io_service m_service;
++    boost::asio::io_context m_service;
+ };
+ 
+ } // namespace crossplat
+--- a/Release/tests/functional/pplx/pplx_test/pplx_op_test.cpp
++++ b/Release/tests/functional/pplx/pplx_test/pplx_op_test.cpp
+@@ -57,7 +57,7 @@
+     virtual void schedule(pplx::TaskProc_t proc, void* param)
+     {
+         pplx::details::atomic_increment(s_flag);
+-        m_pool->service().post([=]() -> void { proc(param); });
++        boost::asio::post(m_pool->service(), [=]() -> void { proc(param); });
+     }
+ 
+ public:


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2025-05-24  9:06 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-24  9:06 [gentoo-commits] repo/proj/guru:dev commit in: dev-cpp/cpprestsdk/files/, dev-cpp/cpprestsdk/ Jan-Espen Oversand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox