commit:     c39f74f688de150ce0c3cfa0f3b2aa9efe845179
Author:     Michael Mair-Keimberger <mmk <AT> levelnine <DOT> at>
AuthorDate: Sat Apr 23 16:08:11 2022 +0000
Commit:     Conrad Kostecki <conikost <AT> gentoo <DOT> org>
CommitDate: Mon Apr 25 21:46:19 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=c39f74f6

sys-libs/kpmcore: remove unused patch(es)

Package-Manager: Portage-3.0.30, Repoman-3.0.3
Signed-off-by: Michael Mair-Keimberger <mmk <AT> levelnine.at>
Closes: https://github.com/gentoo/gentoo/pull/25169
Signed-off-by: Conrad Kostecki <conikost <AT> gentoo.org>

 .../kpmcore-21.08.3-dont-destroy-zfs-pool.patch    | 46 ----------------------
 ....08.3-fix-seek-error-when-shred-partition.patch | 27 -------------
 2 files changed, 73 deletions(-)

diff --git a/sys-libs/kpmcore/files/kpmcore-21.08.3-dont-destroy-zfs-pool.patch 
b/sys-libs/kpmcore/files/kpmcore-21.08.3-dont-destroy-zfs-pool.patch
deleted file mode 100644
index 83cc15d0f2d5..000000000000
--- a/sys-libs/kpmcore/files/kpmcore-21.08.3-dont-destroy-zfs-pool.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From 282cfdcde179ec44d053b257e13aa715158596bd Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Andrius=20=C5=A0tikonas?= <[email protected]>
-Date: Sat, 30 Oct 2021 21:13:07 +0100
-Subject: [PATCH] Do not destroy zfs pool when removing zfs partition.
-
-This can be dangerous, e.g. if partition is part of raid set.
-So better be more cautious and in some cases fail to remove partition
-than lose data.
----
- src/fs/zfs.cpp | 7 -------
- src/fs/zfs.h   | 1 -
- 2 files changed, 8 deletions(-)
-
-diff --git a/src/fs/zfs.cpp b/src/fs/zfs.cpp
-index 5d12894..5f4c87d 100644
---- a/src/fs/zfs.cpp
-+++ b/src/fs/zfs.cpp
-@@ -77,13 +77,6 @@ qint64 zfs::maxCapacity() const
-     return Capacity::unitFactor(Capacity::Unit::Byte, Capacity::Unit::EiB);
- }
- 
--bool zfs::remove(Report& report, const QString& deviceNode) const
--{
--    Q_UNUSED(deviceNode)
--    ExternalCommand cmd(report, QStringLiteral("zpool"), { 
QStringLiteral("destroy"), QStringLiteral("-f"), label() });
--    return cmd.run(-1) && cmd.exitCode() == 0;
--}
--
- bool zfs::writeLabel(Report& report, const QString& deviceNode, const 
QString& newLabel)
- {
-     Q_UNUSED(deviceNode)
-diff --git a/src/fs/zfs.h b/src/fs/zfs.h
-index 6e559de..61026c3 100644
---- a/src/fs/zfs.h
-+++ b/src/fs/zfs.h
-@@ -35,7 +35,6 @@ public:
- public:
-     void init() override;
- 
--    bool remove(Report& report, const QString& deviceNode) const override;
-     bool writeLabel(Report& report, const QString& deviceNode, const QString& 
newLabel) override;
- 
-     CommandSupportType supportGetUsed() const override {
--- 
-GitLab
-

diff --git 
a/sys-libs/kpmcore/files/kpmcore-21.08.3-fix-seek-error-when-shred-partition.patch
 
b/sys-libs/kpmcore/files/kpmcore-21.08.3-fix-seek-error-when-shred-partition.patch
deleted file mode 100644
index 568db790c7e9..000000000000
--- 
a/sys-libs/kpmcore/files/kpmcore-21.08.3-fix-seek-error-when-shred-partition.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From e9fc875c5e233401afd12f54ab0472c66ff6fdff Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Andrius=20=C5=A0tikonas?= <[email protected]>
-Date: Fri, 12 Nov 2021 01:02:15 +0000
-Subject: [PATCH] Fix seek error when filling device with random data or
- zeroes.
-
----
- src/util/externalcommandhelper.cpp | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/src/util/externalcommandhelper.cpp 
b/src/util/externalcommandhelper.cpp
-index 6e61aba..0fa400d 100644
---- a/src/util/externalcommandhelper.cpp
-+++ b/src/util/externalcommandhelper.cpp
-@@ -80,7 +80,8 @@ bool ExternalCommandHelper::readData(const QString& 
sourceDevice, QByteArray& bu
-         return false;
-     }
- 
--    if (!device.seek(offset)) {
-+    // Sequential devices such as /dev/zero or /dev/urandom return false on 
seek().
-+    if (!device.isSequential() && !device.seek(offset)) {
-         qCritical() << xi18n("Could not seek position %1 on device 
<filename>%2</filename>.", offset, sourceDevice);
-         return false;
-     }
--- 
-GitLab
-

Reply via email to