Re: patch: if_iwx.c add support for ax201 with subsystem id 0x0030

2022-01-09 Thread Stefan Sperling
On Sun, Jan 09, 2022 at 05:32:21PM +, Iraklis Karagkiozoglou wrote:
> Hi,
> 
> I've added support for AX201 with subsystem id 0x0030 in if_iwx.
> 
> I am only loading a different firmware for the specific subsystem id to
> avoid introducing any regressions or bugs.

We are receiving more and more reports like yours that our iwm(4) and
iwx(4) driver do not match some Intel wifi device which could be supported.

So far, the devices recognized by our drivers are essentially the exact
devices which were used by developers to make the drivers work.
In the wild there are many devices which Intel has released which we do
not recognize because the code we have to match devices is too simplistic.

The Linux driver does not match your device based on subsystem ID 0x0030.
Instead, it matches on the PCI product ID (0x20f0), reads the CSR_HW_RF_ID
register, and maps your device to QuZ-a0-jf-b0 based on the value of this
register! If you look into Linux you will see that it does not list subsystem
ID 0x0030 anywhere; your device's subsystem ID is matched by the "IWL_CFG_ANY"
wildcard.

In order to use the correct firmware for each device Intel has shipped,
the device matching code in our driver needs to be reworked to follow
the same rules as Linux. Otherwise we will never get this right.
Their device matching code is very complicated (see iwlwifif/pcie/drv.c).
We will need to port this code over to have a proper long-term solution :-/



relayd: conversion for opaque RSA_METHOD

2022-01-09 Thread Theo Buehler
Since RSA_METHOD will become opaque, we can no longer initialize a
static version of it on the stack. Instead, allocate it once and
use accessors to initialize it.

I removed parentheses in all return statements to avoid a lot of
linewrapping. Other than that, this is a mostly mechanical diff.

Index: ca.c
===
RCS file: /cvs/src/usr.sbin/relayd/ca.c,v
retrieving revision 1.37
diff -u -p -r1.37 ca.c
--- ca.c8 Dec 2021 03:40:44 -   1.37
+++ ca.c9 Jan 2022 18:40:12 -
@@ -220,10 +220,10 @@ ca_dispatch_parent(int fd, struct privse
config_getreset(env, imsg);
break;
default:
-   return (-1);
+   return -1;
}
 
-   return (0);
+   return 0;
 }
 
 int
@@ -290,10 +290,10 @@ ca_dispatch_relay(int fd, struct privsep
RSA_free(rsa);
break;
default:
-   return (-1);
+   return -1;
}
 
-   return (0);
+   return 0;
 }
 
 /*
@@ -301,23 +301,7 @@ ca_dispatch_relay(int fd, struct privsep
  */
 
 const RSA_METHOD *rsa_default = NULL;
-
-static RSA_METHOD rsae_method = {
-   "RSA privsep engine",
-   rsae_pub_enc,
-   rsae_pub_dec,
-   rsae_priv_enc,
-   rsae_priv_dec,
-   rsae_mod_exp,
-   rsae_bn_mod_exp,
-   rsae_init,
-   rsae_finish,
-   0,
-   NULL,
-   rsae_sign,
-   rsae_verify,
-   rsae_keygen
-};
+static RSA_METHOD *rsae_method;
 
 static int
 rsae_send_imsg(int flen, const u_char *from, u_char *to, RSA *rsa,
@@ -337,7 +321,7 @@ rsae_send_imsg(int flen, const u_char *f
static u_int seq = 0;
 
if ((hash = RSA_get_ex_data(rsa, 0)) == NULL)
-   return (0);
+   return 0;
 
iev = proc_iev(ps, PROC_CA, ps->ps_instance);
ibuf = >ibuf;
@@ -378,7 +362,7 @@ rsae_send_imsg(int flen, const u_char *f
__func__,
cmd == IMSG_CA_PRIVENC ? "enc" : "dec",
cko.cko_cookie);
-   return (-1);
+   return -1;
default:
break;
}
@@ -426,44 +410,42 @@ rsae_send_imsg(int flen, const u_char *f
}
imsg_event_add(iev);
 
-   return (ret);
+   return ret;
 }
 
 int
 rsae_pub_enc(int flen,const u_char *from, u_char *to, RSA *rsa,int padding)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   return (rsa_default->rsa_pub_enc(flen, from, to, rsa, padding));
+   return RSA_meth_get_pub_enc(rsa_default)(flen, from, to, rsa, padding);
 }
 
 int
 rsae_pub_dec(int flen,const u_char *from, u_char *to, RSA *rsa,int padding)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   return (rsa_default->rsa_pub_dec(flen, from, to, rsa, padding));
+   return RSA_meth_get_pub_dec(rsa_default)(flen, from, to, rsa, padding);
 }
 
 int
 rsae_priv_enc(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   return (rsae_send_imsg(flen, from, to, rsa, padding,
-   IMSG_CA_PRIVENC));
+   return rsae_send_imsg(flen, from, to, rsa, padding, IMSG_CA_PRIVENC);
 }
 
 int
 rsae_priv_dec(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   return (rsae_send_imsg(flen, from, to, rsa, padding,
-   IMSG_CA_PRIVDEC));
+   return rsae_send_imsg(flen, from, to, rsa, padding, IMSG_CA_PRIVDEC);
 }
 
 int
 rsae_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   return (rsa_default->rsa_mod_exp(r0, I, rsa, ctx));
+   return RSA_meth_get_mod_exp(rsa_default)(r0, I, rsa, ctx);
 }
 
 int
@@ -471,25 +453,25 @@ rsae_bn_mod_exp(BIGNUM *r, const BIGNUM 
 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   return (rsa_default->bn_mod_exp(r, a, p, m, ctx, m_ctx));
+   return RSA_meth_get_bn_mod_exp(rsa_default)(r, a, p, m, ctx, m_ctx);
 }
 
 int
 rsae_init(RSA *rsa)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   if (rsa_default->init == NULL)
-   return (1);
-   return (rsa_default->init(rsa));
+   if (RSA_meth_get_init(rsa_default) == NULL)
+   return 1;
+   return RSA_meth_get_init(rsa_default)(rsa);
 }
 
 int
 rsae_finish(RSA *rsa)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   if (rsa_default->finish == NULL)
-   return (1);
-   return (rsa_default->finish(rsa));
+   if (RSA_meth_get_finish(rsa_default) == NULL)
+   return 1;
+   return RSA_meth_get_finish(rsa_default)(rsa);
 }
 
 int
@@ -497,8 +479,8 @@ rsae_sign(int type, const u_char *m, u_i
 u_int *siglen, const RSA *rsa)
 {
DPRINTF("%s:%d", __func__, __LINE__);
-   

patch: if_iwx.c add support for ax201 with subsystem id 0x0030

2022-01-09 Thread Iraklis Karagkiozoglou
Hi,

I've added support for AX201 with subsystem id 0x0030 in if_iwx.

I am only loading a different firmware for the specific subsystem id to
avoid introducing any regressions or bugs.

--
Iraklis Karagkiozoglou

diff --git sys/dev/pci/if_iwx.c sys/dev/pci/if_iwx.c
index 4c85ad108a8..78b73303f42 100644
--- sys/dev/pci/if_iwx.c
+++ sys/dev/pci/if_iwx.c
@@ -9180,6 +9180,7 @@ static const struct pci_matchid iwx_devices[] = {
 };
 
 static const struct pci_matchid iwx_subsystem_id_ax201[] = {
+   { PCI_VENDOR_INTEL, 0x0030 },
{ PCI_VENDOR_INTEL, 0x0070 },
{ PCI_VENDOR_INTEL, 0x0074 },
{ PCI_VENDOR_INTEL, 0x0078 },
@@ -9311,6 +9312,8 @@ iwx_attach(struct device *parent, struct device *self, 
void *aux)
pcireg_t reg, memtype;
struct ieee80211com *ic = >sc_ic;
struct ifnet *ifp = >ic_if;
+   pcireg_t subid;
+   pci_product_id_t spid;
const char *intrstr;
int err;
int txq_i, i, j;
@@ -9394,6 +9397,9 @@ iwx_attach(struct device *parent, struct device *self, 
void *aux)
sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
 
+   subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
+   spid = PCI_PRODUCT(subid);
+
switch (PCI_PRODUCT(pa->pa_id)) {
case PCI_PRODUCT_INTEL_WL_22500_1:
sc->sc_fwname = "iwx-cc-a0-67";
@@ -9406,6 +9412,26 @@ iwx_attach(struct device *parent, struct device *self, 
void *aux)
sc->sc_uhb_supported = 0;
break;
case PCI_PRODUCT_INTEL_WL_22500_2:
+   if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
+   printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
+   return;
+   }
+
+   if (spid == 0x0030) {
+   sc->sc_fwname = "iwx-QuZ-a0-jf-b0-63";
+   }
+   else {
+   sc->sc_fwname = "iwx-QuZ-a0-hr-b0-67";
+   }
+
+   sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+   sc->sc_integrated = 1;
+   sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
+   sc->sc_low_latency_xtal = 0;
+   sc->sc_xtal_latency = 500;
+   sc->sc_tx_with_siso_diversity = 0;
+   sc->sc_uhb_supported = 0;
+   break;
case PCI_PRODUCT_INTEL_WL_22500_3:
case PCI_PRODUCT_INTEL_WL_22500_5:
if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {



patch: move kern_unveil.c to use DPRINTF()

2022-01-09 Thread Sebastien Marie
Hi,

The following diff changes (but not too much) the way printf debug is
done in kern_unveil.c

Currently, each printf() is enclosed in #ifdef DEBUG_UNVEIL. The diff
moves to using DPRINTF(). It reduces the number of #ifdef inside the
file.

I also changed some strings to use __func__ instead of using the
function name verbatim.

Build tested with and without DEBUG_UNVEIL defined.

No intented changes.

Comments or OK ?
-- 
Sebastien Marie

diff de3a27964b222c4be979b46c1662d48f67059711 /home/semarie/repos/openbsd/src
blob - 50a043e63d5ae40167428b894994ae10f5b705c1
file + sys/kern/kern_unveil.c
--- sys/kern/kern_unveil.c
+++ sys/kern/kern_unveil.c
@@ -56,6 +56,11 @@ struct unveil {
 };
 
 /* #define DEBUG_UNVEIL */
+#ifdef DEBUG_UNVEIL
+#defineDPRINTF(x...)   do { printf(x); } while (0)
+#else
+#defineDPRINTF(x...)
+#endif
 
 #define UNVEIL_MAX_VNODES  128
 #define UNVEIL_MAX_NAMES   128
@@ -103,9 +108,8 @@ unveil_delete_names(struct unveil *uv)
ret++;
}
rw_exit_write(>uv_lock);
-#ifdef DEBUG_UNVEIL
-   printf("deleted %d names\n", ret);
-#endif
+
+   DPRINTF("deleted %d names\n", ret);
return ret;
 }
 
@@ -120,9 +124,8 @@ unveil_add_name_unlocked(struct unveil *uv, char *name
unvname_delete(unvn);
return 0;
}
-#ifdef DEBUG_UNVEIL
-   printf("added name %s underneath vnode %p\n", name, uv->uv_vp);
-#endif
+
+   DPRINTF("added name %s underneath vnode %p\n", name, uv->uv_vp);
return 1;
 }
 
@@ -144,10 +147,8 @@ unveil_namelookup(struct unveil *uv, char *name)
 
rw_enter_read(>uv_lock);
 
-#ifdef DEBUG_UNVEIL
-   printf("unveil_namelookup: looking up name %s (%p) in vnode %p\n",
-   name, name, uv->uv_vp);
-#endif
+   DPRINTF("%s: looking up name %s (%p) in vnode %p\n",
+   __func__, name, name, uv->uv_vp);
 
KASSERT(uv->uv_vp != NULL);
 
@@ -158,14 +159,9 @@ unveil_namelookup(struct unveil *uv, char *name)
 
rw_exit_read(>uv_lock);
 
-#ifdef DEBUG_UNVEIL
-   if (ret == NULL)
-   printf("unveil_namelookup: no match for name %s in vnode %p\n",
-   name, uv->uv_vp);
-   else
-   printf("unveil_namelookup: matched name %s in vnode %p\n",
-   name, uv->uv_vp);
-#endif
+   DPRINTF("%s: %s name %s in vnode %p\n", __func__,
+   (ret == NULL) ? "no match for" : "matched",
+   name, uv->uv_vp);
return ret;
 }
 
@@ -181,11 +177,10 @@ unveil_destroy(struct process *ps)
/* skip any vnodes zapped by unveil_removevnode */
if (vp != NULL) {
vp->v_uvcount--;
-#ifdef DEBUG_UNVEIL
-   printf("unveil: %s(%d): removing vnode %p uvcount %d "
+
+   DPRINTF("unveil: %s(%d): removing vnode %p uvcount %d "
"in position %ld\n",
ps->ps_comm, ps->ps_pid, vp, vp->v_uvcount, i);
-#endif
vrele(vp);
}
ps->ps_uvncount -= unveil_delete_names(uv);
@@ -291,7 +286,7 @@ unveil_find_cover(struct vnode *dp, struct proc *p)
 * This corner case should not happen because
 * we have not set LOCKPARENT in the flags
 */
-   printf("vnode %p PDIRUNLOCK on error\n", vp);
+   DPRINTF("vnode %p PDIRUNLOCK on error\n", vp);
vrele(vp);
}
break;
@@ -372,9 +367,7 @@ unveil_setflags(u_char *flags, u_char nflags)
 {
 #if 0
if (((~(*flags)) & nflags) != 0) {
-#ifdef DEBUG_UNVEIL
-   printf("Flags escalation %llX -> %llX\n", *flags, nflags);
-#endif
+   DPRINTF("Flags escalation %llX -> %llX\n", *flags, nflags);
return 1;
}
 #endif
@@ -465,11 +458,10 @@ unveil_add(struct proc *p, struct nameidata *ndp, cons
 * unrestrict it.
 */
if (directory_add) {
-#ifdef DEBUG_UNVEIL
-   printf("unveil: %s(%d): updating directory vnode %p"
+   DPRINTF("unveil: %s(%d): updating directory vnode %p"
" to unrestricted uvcount %d\n",
pr->ps_comm, pr->ps_pid, vp, vp->v_uvcount);
-#endif
+
if (!unveil_setflags(>uv_flags, flags))
ret = EPERM;
else
@@ -485,12 +477,11 @@ unveil_add(struct proc *p, struct nameidata *ndp, cons
struct unvname *tname;
if ((tname = unveil_namelookup(uv,
ndp->ni_cnd.cn_nameptr)) != NULL) {
-#ifdef DEBUG_UNVEIL
-   printf("unveil: %s(%d): changing flags for %s"
+

Re: unwind/unbound: fix build with opaque DSA

2022-01-09 Thread Stuart Henderson
On 2022/01/09 14:18, Theo Buehler wrote:
> This switches to using the OpenSSL 1.1 codepath that does DSA_set0_pqg().
> Hopefully it's not too much hassle for updates. DSA_set0_pqg() has been
> in LibreSSL for a while, so we can probably upstream this easily.
> 
> libunbound in ports will need the same patch.

OK.

> Index: sbin/unwind/libunbound/sldns/keyraw.c
> ===
> RCS file: /cvs/src/sbin/unwind/libunbound/sldns/keyraw.c,v
> retrieving revision 1.2
> diff -u -p -r1.2 keyraw.c
> --- sbin/unwind/libunbound/sldns/keyraw.c 14 Aug 2021 07:32:46 -  
> 1.2
> +++ sbin/unwind/libunbound/sldns/keyraw.c 9 Jan 2022 12:24:28 -
> @@ -250,7 +250,7 @@ sldns_key_buf2dsa_raw(unsigned char* key
>   if(!(dsa = DSA_new())) {
>   return NULL;
>   }
> -#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
> +#if OPENSSL_VERSION_NUMBER < 0x1010
>  #ifndef S_SPLINT_S
>   dsa->p = P;
>   dsa->q = Q;
> @@ -428,7 +428,7 @@ sldns_key_buf2rsa_raw(unsigned char* key
>   BN_free(modulus);
>   return NULL;
>   }
> -#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
> +#if OPENSSL_VERSION_NUMBER < 0x1010
>  #ifndef S_SPLINT_S
>   rsa->n = modulus;
>   rsa->e = exponent;
> Index: usr.sbin/unbound/sldns/keyraw.c
> ===
> RCS file: /cvs/src/usr.sbin/unbound/sldns/keyraw.c,v
> retrieving revision 1.6
> diff -u -p -r1.6 keyraw.c
> --- usr.sbin/unbound/sldns/keyraw.c   13 Aug 2021 19:58:46 -  1.6
> +++ usr.sbin/unbound/sldns/keyraw.c   9 Jan 2022 12:24:54 -
> @@ -250,7 +250,7 @@ sldns_key_buf2dsa_raw(unsigned char* key
>   if(!(dsa = DSA_new())) {
>   return NULL;
>   }
> -#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
> +#if OPENSSL_VERSION_NUMBER < 0x1010
>  #ifndef S_SPLINT_S
>   dsa->p = P;
>   dsa->q = Q;
> @@ -428,7 +428,7 @@ sldns_key_buf2rsa_raw(unsigned char* key
>   BN_free(modulus);
>   return NULL;
>   }
> -#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
> +#if OPENSSL_VERSION_NUMBER < 0x1010
>  #ifndef S_SPLINT_S
>   rsa->n = modulus;
>   rsa->e = exponent;
> 



request for testing: malloc and large allocations

2022-01-09 Thread Otto Moerbeek
Hi,

currently malloc does cache a number of free'ed regions up to 128k in
size. This cache is indexed by size (in # of pages), so it is very
quick to check.

Some programs allocate and deallocate larger allocations in a frantic
way.  Accodomate those programs by also keeping a cache of regions
betwen 128k and 2M, in a cache of variable sized regions.

My test case speeds up about twice. A make build gets a small speedup.

This has been tested by myself on amd64 quite intensively. I am asking
for more tests, especialy on more "exotic" platforms. I wil do arm64
myself soon.  Test can be running your favorite programs, doing make
builds or running tests in regress/lib/libc/malloc.

Thanks in advance!

-Otto

Index: stdlib/malloc.c
===
RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
retrieving revision 1.272
diff -u -p -r1.272 malloc.c
--- stdlib/malloc.c 19 Sep 2021 09:15:22 -  1.272
+++ stdlib/malloc.c 9 Jan 2022 13:10:35 -
@@ -113,13 +113,28 @@ struct region_info {
 
 LIST_HEAD(chunk_head, chunk_info);
 
-#define MAX_CACHEABLE_SIZE 32
-struct cache {
-   void *pages[MALLOC_MAXCACHE];
+/*
+ * Two caches, one for "small" regions, one for "big".
+ * Small cacche is an array per size, big cache is one array with different
+ * sizes regions
+ */
+#define MAX_SMALLCACHEABLE_SIZE32
+#define MAX_BIGCACHEABLE_SIZE  512
+#define BIGCACHE_SIZE  MALLOC_MAXCACHE
+/* If the total # of pages is larger than this, evict before inserting */
+#define BIGCACHE_FILL(sz)  (MAX_BIGCACHEABLE_SIZE * (sz) / 4)
+
+struct smallcache {
+   void **pages;
ushort length;
ushort max;
 };
 
+struct bigcache {
+   void *page;
+   size_t psize;
+};
+
 struct dir_info {
u_int32_t canary1;
int active; /* status of malloc */
@@ -139,7 +154,10 @@ struct dir_info {
void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
u_char rbytes[32];  /* random bytes */
/* free pages cache */
-   struct cache cache[MAX_CACHEABLE_SIZE];
+   struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE];
+   ushort bigcache_size;
+   size_t bigcache_used;
+   struct bigcache bigcache[BIGCACHE_SIZE];
 #ifdef MALLOC_STATS
size_t inserts;
size_t insert_collisions;
@@ -714,18 +732,61 @@ unmap(struct dir_info *d, void *p, size_
size_t psz = sz >> MALLOC_PAGESHIFT;
void *r;
u_short i;
-   struct cache *cache;
+   struct smallcache *cache;
 
if (sz != PAGEROUND(sz) || psz == 0)
wrterror(d, "munmap round");
 
-   if (psz > MAX_CACHEABLE_SIZE || d->cache[psz - 1].max == 0) {
+   if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
+   psz <= MAX_BIGCACHEABLE_SIZE) {
+   u_short base = getrbyte(d);
+   u_short j;
+
+   /* don't look through all slots */
+   for (j = 0; j < d->bigcache_size / 4; j++) {
+   i = (base + j) % d->bigcache_size;
+   if (d->bigcache_used <
+   BIGCACHE_FILL(d->bigcache_size))  {
+   if (d->bigcache[i].psize == 0)
+   break;
+   } else {
+   if (d->bigcache[i].psize != 0)
+   break;
+   }
+   }
+   /* if we didn't find a preferred slot, use random one */
+   if (d->bigcache[i].psize != 0) {
+   size_t tmp;
+
+   r = d->bigcache[i].page;
+   d->bigcache_used -= d->bigcache[i].psize;
+   tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
+   if (!mopts.malloc_freeunmap)
+   validate_junk(d, r, tmp);
+   if (munmap(r, tmp))
+wrterror(d, "munmap %p", r);
+   STATS_SUB(d->malloc_used, tmp);
+   }
+   
+   if (clear > 0)
+   explicit_bzero(p, clear);
+   if (mopts.malloc_freeunmap) {
+   if (mprotect(p, sz, PROT_NONE))
+   wrterror(d, "mprotect %p", r);
+   } else
+   junk_free(d->malloc_junk, p, sz);
+   d->bigcache[i].page = p;
+   d->bigcache[i].psize = psz;
+   d->bigcache_used += psz;
+   return;
+   }
+   if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
if (munmap(p, sz))
wrterror(d, "munmap %p", p);
STATS_SUB(d->malloc_used, sz);
return;
}
-   cache = >cache[psz - 1];
+   cache = 

unwind/unbound: fix build with opaque DSA

2022-01-09 Thread Theo Buehler
This switches to using the OpenSSL 1.1 codepath that does DSA_set0_pqg().
Hopefully it's not too much hassle for updates. DSA_set0_pqg() has been
in LibreSSL for a while, so we can probably upstream this easily.

libunbound in ports will need the same patch.

Index: sbin/unwind/libunbound/sldns/keyraw.c
===
RCS file: /cvs/src/sbin/unwind/libunbound/sldns/keyraw.c,v
retrieving revision 1.2
diff -u -p -r1.2 keyraw.c
--- sbin/unwind/libunbound/sldns/keyraw.c   14 Aug 2021 07:32:46 -  
1.2
+++ sbin/unwind/libunbound/sldns/keyraw.c   9 Jan 2022 12:24:28 -
@@ -250,7 +250,7 @@ sldns_key_buf2dsa_raw(unsigned char* key
if(!(dsa = DSA_new())) {
return NULL;
}
-#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
+#if OPENSSL_VERSION_NUMBER < 0x1010
 #ifndef S_SPLINT_S
dsa->p = P;
dsa->q = Q;
@@ -428,7 +428,7 @@ sldns_key_buf2rsa_raw(unsigned char* key
BN_free(modulus);
return NULL;
}
-#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
+#if OPENSSL_VERSION_NUMBER < 0x1010
 #ifndef S_SPLINT_S
rsa->n = modulus;
rsa->e = exponent;
Index: usr.sbin/unbound/sldns/keyraw.c
===
RCS file: /cvs/src/usr.sbin/unbound/sldns/keyraw.c,v
retrieving revision 1.6
diff -u -p -r1.6 keyraw.c
--- usr.sbin/unbound/sldns/keyraw.c 13 Aug 2021 19:58:46 -  1.6
+++ usr.sbin/unbound/sldns/keyraw.c 9 Jan 2022 12:24:54 -
@@ -250,7 +250,7 @@ sldns_key_buf2dsa_raw(unsigned char* key
if(!(dsa = DSA_new())) {
return NULL;
}
-#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
+#if OPENSSL_VERSION_NUMBER < 0x1010
 #ifndef S_SPLINT_S
dsa->p = P;
dsa->q = Q;
@@ -428,7 +428,7 @@ sldns_key_buf2rsa_raw(unsigned char* key
BN_free(modulus);
return NULL;
}
-#if OPENSSL_VERSION_NUMBER < 0x1010 || defined(HAVE_LIBRESSL)
+#if OPENSSL_VERSION_NUMBER < 0x1010
 #ifndef S_SPLINT_S
rsa->n = modulus;
rsa->e = exponent;