Is it safe? What kind of deadlock/weird situation can occur?
I'm asking because the diff below, that introduces a lock to protect uid
globals, has an XXX comment from guenther@ asking if it is a problem.
There's currently multiple free(9) happening while the NET_LOCK() is
being held, so I assume it is ok to remove the comment below.
Any other comment on the diff below? It currently doesn't buy much
since all the callers are executed under KERNEL_LOCK(), but that's a
small step towards guenther@'s proctree lock. Ok?
Index: kern/kern_proc.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_proc.c,v
retrieving revision 1.81
diff -u -p -r1.81 kern_proc.c
--- kern/kern_proc.c 20 Feb 2018 12:38:58 -0000 1.81
+++ kern/kern_proc.c 21 Feb 2018 09:18:25 -0000
@@ -39,6 +39,7 @@
#include <sys/buf.h>
#include <sys/acct.h>
#include <sys/wait.h>
+#include <sys/rwlock.h>
#include <ufs/ufs/quota.h>
#include <sys/uio.h>
#include <sys/malloc.h>
@@ -49,6 +50,7 @@
#include <sys/pool.h>
#include <sys/vnode.h>
+struct rwlock uidinfolk;
#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
u_long uihash; /* size of hash table - 1 */
@@ -91,6 +93,7 @@ procinit(void)
LIST_INIT(&zombprocess);
LIST_INIT(&allproc);
+ rw_init(&uidinfolk, "uidinfo");
tidhashtbl = hashinit(maxthread / 4, M_PROC, M_NOWAIT, &tidhash);
pidhashtbl = hashinit(maxprocess / 4, M_PROC, M_NOWAIT, &pidhash);
@@ -113,6 +116,10 @@ procinit(void)
PR_WAITOK, "sessionpl", NULL);
}
+/*
+ * This returns with `uidinfolk' held: caller must call uid_release()
+ * after making whatever change they needed.
+ */
struct uidinfo *
uid_find(uid_t uid)
{
@@ -120,16 +127,20 @@ uid_find(uid_t uid)
struct uihashhead *uipp;
uipp = UIHASH(uid);
+ rw_enter_write(&uidinfolk);
LIST_FOREACH(uip, uipp, ui_hash)
if (uip->ui_uid == uid)
break;
if (uip)
return (uip);
+ rw_exit_write(&uidinfolk);
nuip = malloc(sizeof(*nuip), M_PROC, M_WAITOK|M_ZERO);
+ rw_enter_write(&uidinfolk);
LIST_FOREACH(uip, uipp, ui_hash)
if (uip->ui_uid == uid)
break;
if (uip) {
+ /* XXX unlock uidinfolk across the free()? */
free(nuip, M_PROC, sizeof(*nuip));
return (uip);
}
@@ -139,6 +150,12 @@ uid_find(uid_t uid)
return (nuip);
}
+void
+uid_release(struct uidinfo *uip)
+{
+ rw_exit_write(&uidinfolk);
+}
+
/*
* Change the count associated with number of threads
* a given user is using.
@@ -147,12 +164,14 @@ int
chgproccnt(uid_t uid, int diff)
{
struct uidinfo *uip;
+ long count;
uip = uid_find(uid);
- uip->ui_proccnt += diff;
- if (uip->ui_proccnt < 0)
+ count = (uip->ui_proccnt += diff);
+ uid_release(uip);
+ if (count < 0)
panic("chgproccnt: procs < 0");
- return (uip->ui_proccnt);
+ return count;
}
/*
Index: kern/vfs_lockf.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_lockf.c,v
retrieving revision 1.24
diff -u -p -r1.24 vfs_lockf.c
--- kern/vfs_lockf.c 7 Nov 2016 00:26:33 -0000 1.24
+++ kern/vfs_lockf.c 21 Feb 2018 09:12:55 -0000
@@ -106,9 +106,12 @@ lf_alloc(uid_t uid, int allowfail)
uip = uid_find(uid);
if (uid && allowfail && uip->ui_lockcnt >
- (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2)))
+ (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
+ uid_release(uip);
return (NULL);
+ }
uip->ui_lockcnt++;
+ uid_release(uip);
lock = pool_get(&lockfpool, PR_WAITOK);
lock->lf_uid = uid;
return (lock);
@@ -121,6 +124,7 @@ lf_free(struct lockf *lock)
uip = uid_find(lock->lf_uid);
uip->ui_lockcnt--;
+ uid_release(uip);
pool_put(&lockfpool, lock);
}
Index: sys/proc.h
===================================================================
RCS file: /cvs/src/sys/sys/proc.h,v
retrieving revision 1.246
diff -u -p -r1.246 proc.h
--- sys/proc.h 20 Feb 2018 12:38:58 -0000 1.246
+++ sys/proc.h 21 Feb 2018 09:09:16 -0000
@@ -408,6 +408,7 @@ struct uidinfo {
};
struct uidinfo *uid_find(uid_t);
+void uid_release(struct uidinfo *);
/*
* We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t,