On Thu, Apr 19, 2018 at 04:47:36PM -0400, mabi wrote:
> The VPN works fine for low data traffic but as soon as I start a big transfer
> between the two sites the kernel panics when iked wants to rekey the SA. I
> can reproduce this on demand by using the iperf tool for example.
Please test the following patch. It ensures that a session is not freed
while it is in use.
Index: arch/amd64/amd64/aesni.c
===================================================================
RCS file: src/sys/arch/amd64/amd64/aesni.c,v
retrieving revision 1.44
diff -u -p -r1.44 aesni.c
--- arch/amd64/amd64/aesni.c 9 Apr 2018 04:34:56 -0000 1.44
+++ arch/amd64/amd64/aesni.c 20 Apr 2018 01:43:02 -0000
@@ -21,6 +21,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/atomic.h>
#include <sys/queue.h>
#include <sys/malloc.h>
#include <sys/pool.h>
@@ -50,6 +51,7 @@ struct aesni_session {
uint32_t ses_dkey[4 * (AES_MAXROUNDS + 1)];
uint32_t ses_klen;
uint8_t ses_nonce[AESCTR_NONCESIZE];
+ unsigned int ses_refs;
int ses_sid;
GHASH_CTX *ses_ghash;
struct aesni_xts_ctx *ses_xts;
@@ -105,6 +107,10 @@ int aesni_newsession(u_int32_t *, struct
int aesni_freesession(u_int64_t);
int aesni_process(struct cryptop *);
+struct aesni_session *
+ aesni_get(uint32_t);
+void aesni_put(struct aesni_session *);
+
int aesni_swauth(struct cryptop *, struct cryptodesc *, struct swcr_data *,
caddr_t);
@@ -181,15 +187,11 @@ aesni_newsession(u_int32_t *sidp, struct
if (!ses)
return (ENOMEM);
+ ses->ses_refs = 1;
ses->ses_buf = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
if (ses->ses_buf != NULL)
ses->ses_buflen = PAGE_SIZE;
- mtx_enter(&aesni_sc->sc_mtx);
- LIST_INSERT_HEAD(&aesni_sc->sc_sessions, ses, ses_entries);
- mtx_leave(&aesni_sc->sc_mtx);
- ses->ses_sid = ++aesni_sc->sc_sid;
-
for (c = cri; c != NULL; c = c->cri_next) {
switch (c->cri_alg) {
case CRYPTO_AES_CBC:
@@ -214,7 +216,7 @@ aesni_newsession(u_int32_t *sidp, struct
ses->ses_xts = malloc(sizeof(struct aesni_xts_ctx),
M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
if (ses->ses_xts == NULL) {
- aesni_freesession(ses->ses_sid);
+ aesni_put(ses);
return (ENOMEM);
}
@@ -238,7 +240,7 @@ aesni_newsession(u_int32_t *sidp, struct
ses->ses_ghash = malloc(sizeof(GHASH_CTX),
M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
if (ses->ses_ghash == NULL) {
- aesni_freesession(ses->ses_sid);
+ aesni_put(ses);
return (ENOMEM);
}
@@ -267,7 +269,7 @@ aesni_newsession(u_int32_t *sidp, struct
swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
M_NOWAIT|M_ZERO);
if (swd == NULL) {
- aesni_freesession(ses->ses_sid);
+ aesni_put(ses);
return (ENOMEM);
}
ses->ses_swd = swd;
@@ -275,14 +277,14 @@ aesni_newsession(u_int32_t *sidp, struct
swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
M_NOWAIT);
if (swd->sw_ictx == NULL) {
- aesni_freesession(ses->ses_sid);
+ aesni_put(ses);
return (ENOMEM);
}
swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
M_NOWAIT);
if (swd->sw_octx == NULL) {
- aesni_freesession(ses->ses_sid);
+ aesni_put(ses);
return (ENOMEM);
}
@@ -316,11 +318,16 @@ aesni_newsession(u_int32_t *sidp, struct
break;
default:
- aesni_freesession(ses->ses_sid);
+ aesni_put(ses);
return (EINVAL);
}
}
+ mtx_enter(&aesni_sc->sc_mtx);
+ LIST_INSERT_HEAD(&aesni_sc->sc_sessions, ses, ses_entries);
+ ses->ses_sid = ++aesni_sc->sc_sid;
+ mtx_leave(&aesni_sc->sc_mtx);
+
*sidp = ses->ses_sid;
return (0);
}
@@ -329,23 +336,33 @@ int
aesni_freesession(u_int64_t tid)
{
struct aesni_session *ses;
- struct swcr_data *swd;
- struct auth_hash *axf;
u_int32_t sid = (u_int32_t)tid;
mtx_enter(&aesni_sc->sc_mtx);
LIST_FOREACH(ses, &aesni_sc->sc_sessions, ses_entries) {
- if (ses->ses_sid == sid)
+ if (ses->ses_sid == sid) {
+ LIST_REMOVE(ses, ses_entries);
break;
+ }
}
mtx_leave(&aesni_sc->sc_mtx);
if (ses == NULL)
return (EINVAL);
- mtx_enter(&aesni_sc->sc_mtx);
- LIST_REMOVE(ses, ses_entries);
- mtx_leave(&aesni_sc->sc_mtx);
+ aesni_put(ses);
+
+ return (0);
+}
+
+void
+aesni_put(struct aesni_session *ses)
+{
+ struct swcr_data *swd;
+ struct auth_hash *axf;
+
+ if (atomic_dec_int_nv(&ses->ses_refs) > 0)
+ return;
if (ses->ses_ghash) {
explicit_bzero(ses->ses_ghash, sizeof(GHASH_CTX));
@@ -379,8 +396,22 @@ aesni_freesession(u_int64_t tid)
explicit_bzero(ses, sizeof (*ses));
pool_put(&aesnipl, ses);
+}
- return (0);
+struct aesni_session *
+aesni_get(uint32_t sid)
+{
+ struct aesni_session *ses = NULL;
+
+ mtx_enter(&aesni_sc->sc_mtx);
+ LIST_FOREACH(ses, &aesni_sc->sc_sessions, ses_entries) {
+ if (ses->ses_sid == sid) {
+ atomic_inc_int(&ses->ses_refs);
+ break;
+ }
+ }
+ mtx_leave(&aesni_sc->sc_mtx);
+ return (ses);
}
int
@@ -609,13 +640,7 @@ aesni_process(struct cryptop *crp)
if (crp->crp_ndesc < 1)
return (EINVAL);
- mtx_enter(&aesni_sc->sc_mtx);
- LIST_FOREACH(ses, &aesni_sc->sc_sessions, ses_entries) {
- if (ses->ses_sid == (crp->crp_sid & 0xffffffff))
- break;
- }
- mtx_leave(&aesni_sc->sc_mtx);
-
+ ses = aesni_get(crp->crp_sid & 0xffffffff);
if (!ses) {
err = EINVAL;
goto out;
@@ -670,6 +695,8 @@ aesni_process(struct cryptop *crp)
}
out:
+ if (ses != NULL)
+ aesni_put(ses);
crp->crp_etype = err;
crypto_done(crp);
return (err);