Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package vulkan-validationlayers for
openSUSE:Factory checked in at 2022-08-29 15:16:31
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/vulkan-validationlayers (Old)
and /work/SRC/openSUSE:Factory/.vulkan-validationlayers.new.2083 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "vulkan-validationlayers"
Mon Aug 29 15:16:31 2022 rev:37 rq:999972 version:1.3.224.1
Changes:
--------
---
/work/SRC/openSUSE:Factory/vulkan-validationlayers/vulkan-validationlayers.changes
2022-08-24 15:11:08.936503699 +0200
+++
/work/SRC/openSUSE:Factory/.vulkan-validationlayers.new.2083/vulkan-validationlayers.changes
2022-08-29 15:16:32.445986661 +0200
@@ -1,0 +2,6 @@
+Fri Aug 26 19:41:43 UTC 2022 - Jan Engelhardt <[email protected]>
+
+- Update to release SDK-1.3.224.1
+ * layers: Fix VK_REMAINING_* on Z-Cull tracking
+
+-------------------------------------------------------------------
Old:
----
sdk-1.3.224.0.tar.gz
New:
----
sdk-1.3.224.1.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ vulkan-validationlayers.spec ++++++
--- /var/tmp/diff_new_pack.D7LPwU/_old 2022-08-29 15:16:33.161988102 +0200
+++ /var/tmp/diff_new_pack.D7LPwU/_new 2022-08-29 15:16:33.165988111 +0200
@@ -17,9 +17,9 @@
Name: vulkan-validationlayers
-Version: 1.3.224.0
+Version: 1.3.224.1
Release: 0
-%define lname libVkLayer_utils-1_3_224_0
+%define lname libVkLayer_utils-1_3_224_1
Summary: Validation layers for Vulkan
License: Apache-2.0
Group: Development/Tools/Other
++++++ sdk-1.3.224.0.tar.gz -> sdk-1.3.224.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/Vulkan-ValidationLayers-sdk-1.3.224.0/layers/best_practices_utils.cpp
new/Vulkan-ValidationLayers-sdk-1.3.224.1/layers/best_practices_utils.cpp
--- old/Vulkan-ValidationLayers-sdk-1.3.224.0/layers/best_practices_utils.cpp
2022-08-19 18:26:32.000000000 +0200
+++ new/Vulkan-ValidationLayers-sdk-1.3.224.1/layers/best_practices_utils.cpp
2022-08-25 19:28:34.000000000 +0200
@@ -1485,7 +1485,7 @@
StateTracker::PreCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo);
auto cb = GetWrite<bp_state::CommandBuffer>(commandBuffer);
- if (cb) return;
+ if (!cb) return;
cb->num_submits = 0;
cb->is_one_time_submit = (pBeginInfo->flags &
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) != 0;
@@ -2053,6 +2053,21 @@
RecordResetZcullDirection(cmd_state, scope.image, scope.range);
}
+template <typename Func>
+static void ForEachSubresource(const IMAGE_STATE& image, const
VkImageSubresourceRange& range, Func&& func)
+{
+ const uint32_t layerCount = (range.layerCount ==
VK_REMAINING_ARRAY_LAYERS) ? image.full_range.layerCount : range.layerCount;
+ const uint32_t levelCount = (range.levelCount == VK_REMAINING_MIP_LEVELS)
? image.full_range.levelCount : range.levelCount;
+
+ for (uint32_t i = 0; i < layerCount; ++i) {
+ const uint32_t layer = range.baseArrayLayer + i;
+ for (uint32_t j = 0; j < levelCount; ++j) {
+ const uint32_t level = range.baseMipLevel + j;
+ func(layer, level);
+ }
+ }
+}
+
void BestPractices::RecordResetZcullDirection(bp_state::CommandBuffer&
cmd_state, VkImage depth_image,
const VkImageSubresourceRange&
subresource_range) {
assert(VendorCheckEnabled(kBPVendorNVIDIA));
@@ -2065,17 +2080,14 @@
}
auto& tree = image_it->second;
- for (uint32_t i = 0; i < subresource_range.layerCount; ++i) {
- const uint32_t layer = subresource_range.baseArrayLayer + i;
-
- for (uint32_t j = 0; j < subresource_range.levelCount; ++j) {
- const uint32_t level = subresource_range.baseMipLevel + j;
+ auto image = Get<IMAGE_STATE>(depth_image);
+ if (!image) return;
- auto& subresource = tree.GetState(layer, level);
- subresource.num_less_draws = 0;
- subresource.num_greater_draws = 0;
- }
- }
+ ForEachSubresource(*image, subresource_range, [&tree](uint32_t layer,
uint32_t level) {
+ auto& subresource = tree.GetState(layer, level);
+ subresource.num_less_draws = 0;
+ subresource.num_greater_draws = 0;
+ });
}
void BestPractices::RecordSetScopeZcullDirection(bp_state::CommandBuffer&
cmd_state, bp_state::CommandBufferStateNV::ZcullDirection mode) {
@@ -2096,14 +2108,12 @@
}
auto& tree = image_it->second;
- for (uint32_t i = 0; i < subresource_range.layerCount; ++i) {
- const uint32_t layer = subresource_range.baseArrayLayer + i;
+ auto image = Get<IMAGE_STATE>(depth_image);
+ if (!image) return;
- for (uint32_t j = 0; j < subresource_range.levelCount; ++j) {
- const uint32_t level = subresource_range.baseMipLevel + j;
- tree.GetState(layer, level).direction =
cmd_state.nv.zcull_direction;
- }
- }
+ ForEachSubresource(*image, subresource_range, [&tree, &cmd_state](uint32_t
layer, uint32_t level) {
+ tree.GetState(layer, level).direction = cmd_state.nv.zcull_direction;
+ });
}
void BestPractices::RecordZcullDraw(bp_state::CommandBuffer& cmd_state) {
@@ -2112,9 +2122,11 @@
// Add one draw to each subresource depending on the current Z-cull
direction
auto& scope = cmd_state.nv.zcull_scope;
- for (uint32_t i = 0; i < scope.range.layerCount; ++i) {
- const uint32_t layer = scope.range.baseArrayLayer + i;
- auto& subresource = scope.tree->GetState(layer,
scope.range.baseMipLevel);
+ auto image = Get<IMAGE_STATE>(scope.image);
+ if (!image) return;
+
+ ForEachSubresource(*image, scope.range, [&scope](uint32_t layer, uint32_t
level) {
+ auto& subresource = scope.tree->GetState(layer, level);
switch (subresource.direction) {
case bp_state::CommandBufferStateNV::ZcullDirection::Unknown:
@@ -2128,7 +2140,7 @@
++subresource.num_greater_draws;
break;
}
- }
+ });
}
bool BestPractices::ValidateZcullScope(const bp_state::CommandBuffer&
cmd_state) const {
@@ -2150,6 +2162,7 @@
const char* good_mode = nullptr;
const char* bad_mode = nullptr;
+ bool is_balanced = false;
const auto image_it = cmd_state.nv.zcull_per_image.find(image);
if (image_it == cmd_state.nv.zcull_per_image.end()) {
@@ -2157,39 +2170,36 @@
}
const auto& tree = image_it->second;
- bool is_balanced = false;
+ auto image_state = Get<IMAGE_STATE>(image);
+ if (!image_state) {
+ return skip;
+ }
- for (uint32_t i = 0; i < subresource_range.layerCount; ++i) {
- const uint32_t layer = subresource_range.baseArrayLayer + i;
+ ForEachSubresource(*image_state, subresource_range, [&](uint32_t layer,
uint32_t level) {
+ if (is_balanced) {
+ return;
+ }
+ const auto& resource = tree.GetState(layer, level);
+ const uint64_t num_draws = resource.num_less_draws +
resource.num_greater_draws;
- for (uint32_t j = 0; j < subresource_range.levelCount; ++j) {
- const uint32_t level = subresource_range.baseMipLevel + j;
+ if (num_draws == 0) {
+ return;
+ }
+ const uint64_t less_ratio = (resource.num_less_draws * 100) /
num_draws;
+ const uint64_t greater_ratio = (resource.num_greater_draws * 100) /
num_draws;
- const auto& resource = tree.GetState(layer, level);
- const uint64_t num_draws = resource.num_less_draws +
resource.num_greater_draws;
+ if ((less_ratio > kZcullDirectionBalanceRatioNVIDIA) && (greater_ratio
> kZcullDirectionBalanceRatioNVIDIA)) {
+ is_balanced = true;
- if (num_draws > 0) {
- const uint64_t less_ratio = (resource.num_less_draws * 100) /
num_draws;
- const uint64_t greater_ratio = (resource.num_greater_draws *
100) / num_draws;
-
- if ((less_ratio > kZcullDirectionBalanceRatioNVIDIA) &&
(greater_ratio > kZcullDirectionBalanceRatioNVIDIA)) {
- is_balanced = true;
-
- if (greater_ratio > less_ratio) {
- good_mode = "GREATER";
- bad_mode = "LESS";
- } else {
- good_mode = "LESS";
- bad_mode = "GREATER";
- }
- break;
- }
+ if (greater_ratio > less_ratio) {
+ good_mode = "GREATER";
+ bad_mode = "LESS";
+ } else {
+ good_mode = "LESS";
+ bad_mode = "GREATER";
}
}
- if (is_balanced) {
- break;
- }
- }
+ });
if (is_balanced) {
skip |= LogPerformanceWarning(
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/Vulkan-ValidationLayers-sdk-1.3.224.0/tests/vklayertests_nvidia_best_practices.cpp
new/Vulkan-ValidationLayers-sdk-1.3.224.1/tests/vklayertests_nvidia_best_practices.cpp
---
old/Vulkan-ValidationLayers-sdk-1.3.224.0/tests/vklayertests_nvidia_best_practices.cpp
2022-08-19 18:26:32.000000000 +0200
+++
new/Vulkan-ValidationLayers-sdk-1.3.224.1/tests/vklayertests_nvidia_best_practices.cpp
2022-08-25 19:28:34.000000000 +0200
@@ -1061,6 +1061,58 @@
m_errorMonitor->VerifyFound();
}
+ discard_barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
+
+ {
+ SCOPED_TRACE("Transfer clear with VK_REMAINING_ARRAY_LAYERS");
+
+ vk::CmdBeginRendering(cmd, &begin_rendering_info);
+
+ vk::CmdSetDepthCompareOp(cmd, VK_COMPARE_OP_LESS);
+ for (int i = 0; i < 60; ++i) m_commandBuffer->Draw(0, 0, 0, 0);
+
+ vk::CmdEndRendering(cmd);
+
+ VkClearDepthStencilValue ds_value{};
+ vk::CmdClearDepthStencilImage(cmd, image.handle(),
VK_IMAGE_LAYOUT_GENERAL, &ds_value, 1,
+ &discard_barrier.subresourceRange);
+
+ vk::CmdBeginRendering(cmd, &begin_rendering_info);
+
+ vk::CmdSetDepthCompareOp(cmd, VK_COMPARE_OP_GREATER);
+ for (int i = 0; i < 40; ++i) m_commandBuffer->Draw(0, 0, 0, 0);
+
+ set_desired_failure_msg();
+ vk::CmdEndRendering(cmd);
+ m_errorMonitor->Finish();
+ }
+
+ discard_barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
+
+ {
+ SCOPED_TRACE("Transfer clear with VK_REMAINING_MIP_LEVELS");
+
+ vk::CmdBeginRendering(cmd, &begin_rendering_info);
+
+ vk::CmdSetDepthCompareOp(cmd, VK_COMPARE_OP_LESS);
+ for (int i = 0; i < 60; ++i) m_commandBuffer->Draw(0, 0, 0, 0);
+
+ vk::CmdEndRendering(cmd);
+
+ VkClearDepthStencilValue ds_value{};
+ vk::CmdClearDepthStencilImage(cmd, image.handle(),
VK_IMAGE_LAYOUT_GENERAL, &ds_value, 1,
+ &discard_barrier.subresourceRange);
+
+ vk::CmdBeginRendering(cmd, &begin_rendering_info);
+
+ vk::CmdSetDepthCompareOp(cmd, VK_COMPARE_OP_GREATER);
+ for (int i = 0; i < 40; ++i) m_commandBuffer->Draw(0, 0, 0, 0);
+
+ set_desired_failure_msg();
+ vk::CmdEndRendering(cmd);
+ m_errorMonitor->Finish();
+ }
+
m_commandBuffer->end();
}
@@ -1200,6 +1252,7 @@
allocate_info.commandBufferCount = 1;
vk_testing::CommandBuffer command_buffer0(*m_device, allocate_info);
vk_testing::CommandBuffer command_buffer1(*m_device, allocate_info);
+ vk_testing::CommandBuffer command_buffer2(*m_device, allocate_info);
VkSubmitInfo submit_info = LvlInitStruct<VkSubmitInfo>();
submit_info.commandBufferCount = 1;
@@ -1250,4 +1303,26 @@
command_buffer1.end();
}
}
+ {
+ begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+
"UNASSIGNED-BestPractices-vkBeginCommandBuffer-one-time-submit");
+
+ submit_info.pCommandBuffers = &command_buffer2.handle();
+
+ command_buffer2.begin(&begin_info);
+ command_buffer2.end();
+
+ err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info,
VK_NULL_HANDLE);
+ ASSERT_VK_SUCCESS(err);
+
+ m_device->wait();
+
+ err = vk::BeginCommandBuffer(command_buffer2.handle(), &begin_info);
+ m_errorMonitor->Finish();
+
+ if (err == VK_SUCCESS) {
+ command_buffer2.end();
+ }
+ }
}