Completely revamp how we do object management with the vulkan backend and

stop using so many static objects so we can tear down and bring up vulkan
on new devices in the same runtime.
This commit is contained in:
Adam Treat 2023-09-12 13:04:55 -04:00
parent 5b2d8236a7
commit 0412ec287c
7 changed files with 175 additions and 113 deletions

View file

@ -58,18 +58,6 @@ Algorithm::destroy()
this->mPipeline = nullptr;
}
if (this->mFreePipelineCache && this->mPipelineCache) {
KP_LOG_DEBUG("Kompute Algorithm Destroying pipeline cache");
if (!this->mPipelineCache) {
KP_LOG_WARN("Kompute Algorithm Error requested to destroy "
"pipeline cache but it is null");
}
this->mDevice->destroy(
*this->mPipelineCache,
(vk::Optional<const vk::AllocationCallbacks>)nullptr);
this->mPipelineCache = nullptr;
}
if (this->mFreePipelineLayout && this->mPipelineLayout) {
KP_LOG_DEBUG("Kompute Algorithm Destroying pipeline layout");
if (!this->mPipelineLayout) {
@ -317,16 +305,6 @@ Algorithm::createPipeline()
"main",
&specializationInfo);
static std::shared_ptr<vk::PipelineCache> globalPipelineCache = std::make_shared<vk::PipelineCache>();
if(!*globalPipelineCache) {
vk::PipelineCacheCreateInfo pipelineCacheInfo =
vk::PipelineCacheCreateInfo();
this->mPipelineCache = globalPipelineCache;
this->mFreePipelineCache = true;
this->mDevice->createPipelineCache(
&pipelineCacheInfo, nullptr, globalPipelineCache.get());
}
vk::ComputePipelineCreateInfo pipelineInfo(vk::PipelineCreateFlags(),
shaderStage,
*this->mPipelineLayout,
@ -335,7 +313,7 @@ Algorithm::createPipeline()
#ifdef KOMPUTE_CREATE_PIPELINE_RESULT_VALUE
vk::ResultValue<vk::Pipeline> pipelineResult =
this->mDevice->createComputePipeline(*globalPipelineCache, pipelineInfo);
this->mDevice->createComputePipeline(*mPipelineCache, pipelineInfo);
if (pipelineResult.result != vk::Result::eSuccess) {
throw std::runtime_error("Failed to create pipeline result: " +
@ -347,7 +325,7 @@ Algorithm::createPipeline()
this->mFreePipeline = true;
#else
vk::Pipeline pipeline =
this->mDevice->createComputePipeline(*globalPipelineCache, pipelineInfo)
this->mDevice->createComputePipeline(*mPipelineCache, pipelineInfo)
.value;
this->mPipeline = std::make_shared<vk::Pipeline>(pipeline);
this->mFreePipeline = true;

View file

@ -88,15 +88,14 @@ Manager::destroy()
this->mManagedSequences.clear();
}
if (this->mManageResources && this->mManagedAlgorithms.size()) {
if (this->mManageResources && !this->mManagedAlgorithmsMap.empty()) {
KP_LOG_DEBUG("Kompute Manager explicitly freeing algorithms");
for (const std::weak_ptr<Algorithm>& weakAlgorithm :
this->mManagedAlgorithms) {
if (std::shared_ptr<Algorithm> algorithm = weakAlgorithm.lock()) {
for (const auto& kv : this->mManagedAlgorithmsMap) {
if (std::shared_ptr<Algorithm> algorithm = kv.second) {
algorithm->destroy();
}
}
this->mManagedAlgorithms.clear();
this->mManagedAlgorithmsMap.clear();
}
if (this->mManageResources && this->mManagedTensors.size()) {
@ -109,6 +108,18 @@ Manager::destroy()
this->mManagedTensors.clear();
}
if (this->mPipelineCache) {
KP_LOG_DEBUG("Kompute Manager Destroying pipeline cache");
if (!this->mPipelineCache) {
KP_LOG_WARN("Kompute Manager Error requested to destroy "
"pipeline cache but it is null");
}
this->mDevice->destroy(
*this->mPipelineCache,
(vk::Optional<const vk::AllocationCallbacks>)nullptr);
this->mPipelineCache = nullptr;
}
if (this->mFreeDevice) {
KP_LOG_INFO("Destroying device");
this->mDevice->destroy(
@ -269,12 +280,14 @@ Manager::clear()
end(this->mManagedTensors),
[](std::weak_ptr<Tensor> t) { return t.expired(); }),
end(this->mManagedTensors));
this->mManagedAlgorithms.erase(
std::remove_if(
begin(this->mManagedAlgorithms),
end(this->mManagedAlgorithms),
[](std::weak_ptr<Algorithm> t) { return t.expired(); }),
end(this->mManagedAlgorithms));
for (auto it = this->mManagedAlgorithmsMap.begin();
it != this->mManagedAlgorithmsMap.end();) {
if (it->second) {
it = this->mManagedAlgorithmsMap.erase(it);
} else {
++it;
}
}
this->mManagedSequences.erase(
std::remove_if(begin(this->mManagedSequences),
end(this->mManagedSequences),
@ -452,6 +465,12 @@ Manager::createDevice(const std::vector<uint32_t>& familyQueueIndices,
}
KP_LOG_DEBUG("Kompute Manager compute queue obtained");
mPipelineCache = std::make_shared<vk::PipelineCache>();
vk::PipelineCacheCreateInfo pipelineCacheInfo =
vk::PipelineCacheCreateInfo();
this->mDevice->createPipelineCache(
&pipelineCacheInfo, nullptr, mPipelineCache.get());
}
std::shared_ptr<Sequence>

View file

@ -45,6 +45,7 @@ class Algorithm
*/
template<typename S = float, typename P = float>
Algorithm(std::shared_ptr<vk::Device> device,
vk::PipelineCache *pipelineCache,
vk::DescriptorPool *pool,
const std::vector<std::shared_ptr<Tensor>>& tensors = {},
const std::vector<uint32_t>& spirv = {},
@ -55,6 +56,7 @@ class Algorithm
KP_LOG_DEBUG("Kompute Algorithm Constructor with device");
this->mDevice = device;
this->mPipelineCache = pipelineCache;
this->mDescriptorPool = pool;
if (tensors.size() && spirv.size()) {
@ -310,8 +312,7 @@ class Algorithm
bool mFreeShaderModule = false;
std::shared_ptr<vk::PipelineLayout> mPipelineLayout;
bool mFreePipelineLayout = false;
std::shared_ptr<vk::PipelineCache> mPipelineCache;
bool mFreePipelineCache = false;
vk::PipelineCache *mPipelineCache = nullptr;
std::shared_ptr<vk::Pipeline> mPipeline;
bool mFreePipeline = false;

View file

@ -39,6 +39,10 @@ class Manager
*/
~Manager();
bool hasInstance() const {
return this->mInstance.get();
}
bool hasDevice() const {
return this->mDevice.get();
}
@ -149,6 +153,7 @@ class Manager
* @returns Shared pointer with initialised algorithm
*/
std::shared_ptr<Algorithm> algorithm(
const std::string &name,
vk::DescriptorPool *pool,
const std::vector<std::shared_ptr<Tensor>>& tensors = {},
const std::vector<uint32_t>& spirv = {},
@ -157,7 +162,7 @@ class Manager
const std::vector<float>& pushConstants = {})
{
return this->algorithm<>(
pool, tensors, spirv, workgroup, specializationConstants, pushConstants);
name, pool, tensors, spirv, workgroup, specializationConstants, pushConstants);
}
/**
@ -176,6 +181,7 @@ class Manager
*/
template<typename S = float, typename P = float>
std::shared_ptr<Algorithm> algorithm(
const std::string &name,
vk::DescriptorPool *pool,
const std::vector<std::shared_ptr<Tensor>>& tensors,
const std::vector<uint32_t>& spirv,
@ -188,6 +194,7 @@ class Manager
std::shared_ptr<Algorithm> algorithm{ new kp::Algorithm(
this->mDevice,
mPipelineCache.get(),
pool,
tensors,
spirv,
@ -196,12 +203,24 @@ class Manager
pushConstants) };
if (this->mManageResources) {
this->mManagedAlgorithms.push_back(algorithm);
this->mManagedAlgorithmsMap.insert({name, algorithm});
}
return algorithm;
}
bool hasAlgorithm(const std::string &name) const {
return mManagedAlgorithmsMap.find(name) != mManagedAlgorithmsMap.end();
}
std::shared_ptr<Algorithm> getAlgorithm(const std::string &name) const {
auto it = mManagedAlgorithmsMap.find(name);
if (it != mManagedAlgorithmsMap.end()) {
return it->second;
}
return nullptr;
}
/**
* Destroy the GPU resources and all managed resources by manager.
**/
@ -237,6 +256,7 @@ class Manager
std::shared_ptr<vk::Device> device() const { return mDevice; }
std::shared_ptr<vk::PhysicalDevice> physicalDevice() const { return mPhysicalDevice; }
std::shared_ptr<vk::PipelineCache> pipelineCache() const { return mPipelineCache; }
private:
// -------------- OPTIONALLY OWNED RESOURCES
@ -250,10 +270,11 @@ class Manager
// -------------- ALWAYS OWNED RESOURCES
std::vector<std::weak_ptr<Tensor>> mManagedTensors;
std::vector<std::weak_ptr<Sequence>> mManagedSequences;
std::vector<std::weak_ptr<Algorithm>> mManagedAlgorithms;
std::unordered_map<std::string, std::shared_ptr<Algorithm>> mManagedAlgorithmsMap;
std::vector<uint32_t> mComputeQueueFamilyIndices;
std::vector<std::shared_ptr<vk::Queue>> mComputeQueues;
std::shared_ptr<vk::PipelineCache> mPipelineCache;
bool mManageResources = false;