diff --git a/sycl/source/detail/async_alloc.cpp b/sycl/source/detail/async_alloc.cpp index ac66d72855df4..96861fa8a587c 100644 --- a/sycl/source/detail/async_alloc.cpp +++ b/sycl/source/detail/async_alloc.cpp @@ -37,13 +37,13 @@ std::vector> getDepGraphNodes( sycl::handler &Handler, detail::queue_impl *Queue, const std::shared_ptr &Graph, const std::vector> &DepEvents) { - auto HandlerImpl = detail::getSyclObjImpl(Handler); + detail::handler_impl &HandlerImpl = *detail::getSyclObjImpl(Handler); // Get dependent graph nodes from any events auto DepNodes = Graph->getNodesForEvents(DepEvents); // If this node was added explicitly we may have node deps in the handler as // well, so add them to the list - DepNodes.insert(DepNodes.end(), HandlerImpl->MNodeDeps.begin(), - HandlerImpl->MNodeDeps.end()); + DepNodes.insert(DepNodes.end(), HandlerImpl.MNodeDeps.begin(), + HandlerImpl.MNodeDeps.end()); // If this is being recorded from an in-order queue we need to get the last // in-order node if any, since this will later become a dependency of the // node being processed here. @@ -119,7 +119,7 @@ __SYCL_EXPORT void *async_malloc_from_pool(sycl::handler &h, size_t size, const memory_pool &pool) { auto &Adapter = h.getContextImpl().getAdapter(); - auto &memPoolImpl = sycl::detail::getSyclObjImpl(pool); + detail::memory_pool_impl &memPoolImpl = *detail::getSyclObjImpl(pool); // Get CG event dependencies for this allocation. const auto &DepEvents = h.impl->CGData.MEvents; @@ -135,12 +135,12 @@ __SYCL_EXPORT void *async_malloc_from_pool(sycl::handler &h, size_t size, // Memory pool is passed as the graph may use some properties of it. alloc = Graph->getMemPool().malloc(size, pool.get_alloc_kind(), DepNodes, - sycl::detail::getSyclObjImpl(pool)); + detail::getSyclObjImpl(pool).get()); } else { ur_queue_handle_t Q = h.impl->get_queue().getHandleRef(); Adapter->call( - Q, memPoolImpl.get()->get_handle(), size, nullptr, UREvents.size(), + Q, memPoolImpl.get_handle(), size, nullptr, UREvents.size(), UREvents.data(), &alloc, &Event); } // Async malloc must return a void* immediately. diff --git a/sycl/source/detail/graph/memory_pool.cpp b/sycl/source/detail/graph/memory_pool.cpp index 63335ccfe0f46..fdbf90df56bee 100644 --- a/sycl/source/detail/graph/memory_pool.cpp +++ b/sycl/source/detail/graph/memory_pool.cpp @@ -22,7 +22,7 @@ namespace detail { void * graph_mem_pool::malloc(size_t Size, usm::alloc AllocType, const std::vector> &DepNodes, - const std::shared_ptr &MemPool) { + memory_pool_impl *MemPool) { // We are potentially modifying contents of this memory pool and the owning // graph, so take a lock here. graph_impl::WriteLock Lock(MGraph.MMutex); @@ -41,8 +41,8 @@ graph_mem_pool::malloc(size_t Size, usm::alloc AllocType, switch (AllocType) { case usm::alloc::device: { - auto &CtxImpl = sycl::detail::getSyclObjImpl(MContext); - auto &Adapter = CtxImpl->getAdapter(); + context_impl &CtxImpl = *getSyclObjImpl(MContext); + auto &Adapter = CtxImpl.getAdapter(); size_t Granularity = get_mem_granularity(MDevice, MContext); uintptr_t StartPtr = 0; @@ -60,8 +60,8 @@ graph_mem_pool::malloc(size_t Size, usm::alloc AllocType, // If no allocation could be reused, do a new virtual reservation Adapter->call( - CtxImpl->getHandleRef(), reinterpret_cast(StartPtr), - AlignedSize, &Alloc); + CtxImpl.getHandleRef(), reinterpret_cast(StartPtr), AlignedSize, + &Alloc); AllocInfo.Size = AlignedSize; AllocInfo.Ptr = Alloc; diff --git a/sycl/source/detail/graph/memory_pool.hpp b/sycl/source/detail/graph/memory_pool.hpp index 0de88a5cc62af..aa4a2d1fb0115 100644 --- a/sycl/source/detail/graph/memory_pool.hpp +++ b/sycl/source/detail/graph/memory_pool.hpp @@ -84,7 +84,7 @@ class graph_mem_pool { /// @return A pointer to the start of the allocation void *malloc(size_t Size, usm::alloc AllocType, const std::vector> &DepNodes, - const std::shared_ptr &MemPool = nullptr); + memory_pool_impl *MemPool = nullptr); /// Return the total amount of memory being used by this pool size_t getMemUseCurrent() const {