Hi,
This is the spinlock change I did propose a few week ago, plus
the addition of Dag.
Changes :
o None
Jean
diff -u -p linux/include/net/irda/irqueue.d6.h linux/include/net/irda/irqueue.h
--- linux/include/net/irda/irqueue.d6.h Mon Jan 8 18:18:20 2001
+++ linux/include/net/irda/irqueue.h Tue Jan 9 12:06:52 2001
@@ -36,12 +36,14 @@
#define NAME_SIZE 32
/*
- * Hash types
+ * Hash types (some flags can be xored)
+ * See comments in irqueue.c for which one to use...
*/
-#define HB_NOLOCK 0
-#define HB_GLOBAL 1
-#define HB_LOCAL 2
-#define HB_SORTED 4
+#define HB_NOLOCK 0 /* No concurent access prevention */
+#define HB_GLOBAL 1 /* Prevent concurent write across processors */
+#define HB_LOCAL 2 /* Deprecated (not SMP compatible) */
+#define HB_SORTED 4 /* Not yet supported */
+#define HB_LOCK 8 /* Prevent concurent write with global lock */
/*
* Hash defines
@@ -79,6 +81,8 @@ typedef struct hashbin_t {
irda_queue_t *hb_queue[HASHBIN_SIZE] ALIGN;
irda_queue_t* hb_current;
+
+ spinlock_t hb_spinlock; /* To be used by the user */
} hashbin_t;
hashbin_t *hashbin_new(int type);
diff -u -p linux/include/net/irda/irlmp.d6.h linux/include/net/irda/irlmp.h
--- linux/include/net/irda/irlmp.d6.h Tue Jan 9 09:49:35 2001
+++ linux/include/net/irda/irlmp.h Tue Jan 9 14:14:54 2001
@@ -176,7 +176,6 @@ struct irlmp_cb {
hashbin_t *services;
hashbin_t *cachelog; /* Current discovery log */
- spinlock_t log_lock; /* discovery log spinlock */
int running;
@@ -248,7 +247,8 @@ extern int sysctl_discovery_slots;
extern int sysctl_discovery;
extern struct irlmp_cb *irlmp;
-static inline hashbin_t *irlmp_get_cachelog(void) { return irlmp->cachelog; }
+/* This function should not exist, it's too dangerous... */
+//static inline hashbin_t *irlmp_get_cachelog(void) { return irlmp->cachelog; }
static inline int irlmp_get_lap_tx_queue_len(struct lsap_cb *self)
{
diff -u -p linux/net/irda/irqueue.d6.c linux/net/irda/irqueue.c
--- linux/net/irda/irqueue.d6.c Mon Jan 8 18:10:47 2001
+++ linux/net/irda/irqueue.c Tue Jan 9 14:57:13 2001
@@ -38,6 +38,75 @@
#include <net/irda/irqueue.h>
#include <net/irda/irmod.h>
+/*
+ * Notes on the concurent access to hashbin and other SMP issues
+ * -------------------------------------------------------------
+ * Hashbins are very often in the IrDA stack a global repository of
+ * information, and therefore used in a very asynchronous manner following
+ * various events (driver calls, timers, user calls...).
+ * Therefore, very often it is highly important to consider the
+ * management of concurent access to the hashbin and how to guarantee the
+ * consistency of the operations on it.
+ *
+ * If you define your hasbin with the flag HB_GLOBAL, the library
+ * will use spinlock to protect properly the following operations :
+ * o hashbin creation with hashbin_new()
+ * o inserting an element with hashbin_insert()
+ * o removing an element through hashbin_remove() & hashbin_remove_this()
+ * The following operation are inadequately protected :
+ * o hashbin_find()
+ * All other operation are unprotected, in particular :
+ * o hashbin_delete()
+ * o hashbin_get_first()/hashbin_get_next()
+ *
+ * In a lot of places in the IrDA stack, hashbin are defined with
+ * HB_LOCAL. This protects concurent access on the local processor, which
+ * means not much on a SMP system.
+ *
+ * Anyway, there is a fundamental flaw in the way the locking operate,
+ * which make it most often unusable, even with HB_GLOBAL. I'll explain...
+ *
+ * Let's take hashbin_find(). At first glance, the function seems fine,
+ * spinlock is grabed at the entrance and released when exiting. What's wrong
+ * with this picture ?
+ * The function hashbin_find() returns a pointer to the hashbin found,
+ * so that the user can have a look at the entry after the call to
+ * hashbin_find(). However, while the user is doing so, he is no longer
+ * protected from a concurent call to hashbin_remove().
+ * In other words, between the call to hashbin_find() and the time
+ * we process the result, the data may go away !!! Ouch !
+ *
+ * The process of hashbin_get_first()/hashbin_get_next() is in my
+ * view even more broken in that respect. Those functions use a global
+ * argument, 'hb_current', so you can't have two concurent GET process
+ * happening in parallel. In other words, you need to lock access from
+ * the initial hashbin_get_first() to the very last hashbin_get_next()...
+ * This means that even on uniprocessor, it's easy to get in troubles...
+ *
+ * Therefore, if you intend to "read" the data in the hashbin in
+ * one way or another (which is very likely), you need to manage the locking
+ * by yourself and not trust the locking in there.
+ * I've added a new flag, HB_LOCK, which use a single global lock
+ * for the function that are self contained. For other functions, the user
+ * should lock the hasbin himslelf using 'hb_spinlock'... For example on
+ * how to do it, please check dicovery.c and irnet_irda.c which does the
+ * right stuff...
+ * It would probably be possible to use a less heavy handed approach,
+ * but I'd rather have things safe than fast...
+ *
+ * Quick summary :
+ * o If your hashbin is used in a single thread of execution, i.e.
+ * everything within a function and its subroutines or with a predetermined
+ * sequence (state machine, time events), use HB_NOLOCK and be happy.
+ * o If your hashbin is used in multiple thread of executions, use
+ * HB_LOCK and lock hashbin_find() and hashbin_get_*() ; or you can lock
+ * everything yourself.
+ * o Use HB_GLOBAl only if you know what you are doing.
+ * o Don't use HB_LOCAL. Anyway, I've deprecated it...
+ *
+ * Jean II, 8 January 01
+ */
+
static irda_queue_t *dequeue_general( irda_queue_t **queue, irda_queue_t* element);
static __u32 hash( char* name);
@@ -65,11 +134,21 @@ hashbin_t *hashbin_new(int type)
memset(hashbin, 0, sizeof(hashbin_t));
hashbin->hb_type = type;
hashbin->magic = HB_MAGIC;
+ hashbin->hb_current = NULL;
/* Make sure all spinlock's are unlocked */
- for (i=0;i<HASHBIN_SIZE;i++)
- hashbin->hb_mutex[i] = SPIN_LOCK_UNLOCKED;
-
+ if ( hashbin->hb_type & HB_GLOBAL ) {
+ for (i=0;i<HASHBIN_SIZE;i++)
+ spin_lock_init(&hashbin->hb_mutex[ i ]);
+ }
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_init(&hashbin->hb_spinlock);
+ }
+ if ( hashbin->hb_type & HB_LOCAL ) {
+ IRDA_DEBUG( 0, __FUNCTION__
+ "(), Warning : using an deprecated flag (HB_LOCAL)!\n");
+ }
+
return hashbin;
}
@@ -115,11 +194,17 @@ int hashbin_clear( hashbin_t* hashbin, F
int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
{
irda_queue_t* queue;
+ unsigned long flags = 0;
int i;
ASSERT(hashbin != NULL, return -1;);
ASSERT(hashbin->magic == HB_MAGIC, return -1;);
+ /* Synchronize */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave( &hashbin->hb_spinlock, flags);
+ }
+
/*
* Free the entries in the hashbin, TODO: use hashbin_clear when
* it has been shown to work
@@ -134,10 +219,18 @@ int hashbin_delete( hashbin_t* hashbin,
}
}
+ /* Cleanup local data */
+ hashbin->hb_current = NULL;
+ hashbin->magic = ~HB_MAGIC;
+
+ /* Release lock */
+ if ( hashbin->hb_type & HB_LOCK) {
+ spin_unlock_irqrestore( &hashbin->hb_spinlock, flags);
+ }
+
/*
* Free the hashbin structure
*/
- hashbin->magic = ~HB_MAGIC;
kfree(hashbin);
return 0;
@@ -167,12 +260,12 @@ void hashbin_insert(hashbin_t* hashbin,
bin = GET_HASHBIN( hashv );
/* Synchronize */
- if ( hashbin->hb_type & HB_GLOBAL ) {
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave( &hashbin->hb_spinlock, flags);
+
+ } else if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
- } else if ( hashbin->hb_type & HB_LOCAL ) {
- save_flags(flags);
- cli();
} /* Default is no-lock */
/*
@@ -195,13 +288,13 @@ void hashbin_insert(hashbin_t* hashbin,
hashbin->hb_size++;
/* Release lock */
- if ( hashbin->hb_type & HB_GLOBAL) {
+ if ( hashbin->hb_type & HB_LOCK) {
+ spin_unlock_irqrestore( &hashbin->hb_spinlock, flags);
+ } else if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
- } else if ( hashbin->hb_type & HB_LOCAL) {
- restore_flags( flags);
- }
+ } /* Default is no-lock */
}
/*
@@ -229,12 +322,10 @@ void* hashbin_find( hashbin_t* hashbin,
bin = GET_HASHBIN( hashv );
/* Synchronize */
+ /* No lock here for HB_LOCK, the caller must do it himself */
if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
- } else if ( hashbin->hb_type & HB_LOCAL ) {
- save_flags(flags);
- cli();
} /* Default is no-lock */
/*
@@ -268,10 +359,12 @@ void* hashbin_find( hashbin_t* hashbin,
if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
- } else if ( hashbin->hb_type & HB_LOCAL) {
- restore_flags( flags);
- }
+ } /* Default is no-lock */
+ /* Note : passing entry to the caller is unsafe. The entry is still
+ * in the hashbin, so another caller can grab it as well.
+ * Jean II */
+
if ( found )
return entry;
else
@@ -321,12 +414,12 @@ void* hashbin_remove( hashbin_t* hashbin
bin = GET_HASHBIN( hashv );
/* Synchronize */
- if ( hashbin->hb_type & HB_GLOBAL ) {
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave( &hashbin->hb_spinlock, flags);
+
+ } else if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
- } else if ( hashbin->hb_type & HB_LOCAL ) {
- save_flags(flags);
- cli();
} /* Default is no-lock */
/*
@@ -374,14 +467,18 @@ void* hashbin_remove( hashbin_t* hashbin
}
/* Release lock */
- if ( hashbin->hb_type & HB_GLOBAL) {
+ if ( hashbin->hb_type & HB_LOCK) {
+ spin_unlock_irqrestore( &hashbin->hb_spinlock, flags);
+
+ } else if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
- } else if ( hashbin->hb_type & HB_LOCAL) {
- restore_flags( flags);
- }
+ } /* Default is no-lock */
-
+ /* Note : we can safely pass entry to the caller, because as it is no
+ * longer in the hashbin, he will be the only owner of it
+ * Jean II */
+
/* Return */
if ( found )
return entry;
@@ -391,7 +488,7 @@ void* hashbin_remove( hashbin_t* hashbin
}
/*
- * Function hashbin_remove (hashbin, hashv, name)
+ * Function hashbin_remove_this (hashbin, entry)
*
* Remove entry with the given name
*
@@ -413,6 +510,11 @@ void* hashbin_remove_this( hashbin_t* ha
ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
ASSERT( entry != NULL, return NULL;);
+ /* Synchronize */
+ if ( hashbin->hb_type & HB_LOCK ) {
+ spin_lock_irqsave( &hashbin->hb_spinlock, flags);
+ }
+
/* Check if valid and not already removed... */
if((entry->q_next == NULL) || (entry->q_prev == NULL))
return NULL;
@@ -427,9 +529,6 @@ void* hashbin_remove_this( hashbin_t* ha
if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
- } else if ( hashbin->hb_type & HB_LOCAL ) {
- save_flags(flags);
- cli();
} /* Default is no-lock */
/*
@@ -449,12 +548,13 @@ void* hashbin_remove_this( hashbin_t* ha
hashbin->hb_current = NULL;
/* Release lock */
- if ( hashbin->hb_type & HB_GLOBAL) {
+ if ( hashbin->hb_type & HB_LOCK) {
+ spin_unlock_irqrestore( &hashbin->hb_spinlock, flags);
+
+ } else if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
- } else if ( hashbin->hb_type & HB_LOCAL) {
- restore_flags( flags);
- }
+ } /* Default is no-lock */
return entry;
}
diff -u -p linux/net/irda/irlmp.d6.c linux/net/irda/irlmp.c
--- linux/net/irda/irlmp.d6.c Tue Jan 9 09:37:29 2001
+++ linux/net/irda/irlmp.c Tue Jan 9 10:20:38 2001
@@ -82,13 +82,13 @@ int __init irlmp_init(void)
memset(irlmp, 0, sizeof(struct irlmp_cb));
irlmp->magic = LMP_MAGIC;
- spin_lock_init(&irlmp->log_lock);
irlmp->clients = hashbin_new(HB_GLOBAL);
irlmp->services = hashbin_new(HB_GLOBAL);
irlmp->links = hashbin_new(HB_GLOBAL);
irlmp->unconnected_lsaps = hashbin_new(HB_GLOBAL);
- irlmp->cachelog = hashbin_new(HB_GLOBAL);
+ irlmp->cachelog = hashbin_new(HB_NOLOCK);
+ spin_lock_init(&irlmp->cachelog->hb_spinlock);
irlmp->free_lsap_sel = 0x10; /* Reserved 0x00-0x0f */
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
@@ -336,7 +336,6 @@ int irlmp_connect_request(struct lsap_cb
struct sk_buff *skb = NULL;
struct lap_cb *lap;
struct lsap_cb *lsap;
- discovery_t *discovery;
ASSERT(self != NULL, return -EBADR;);
ASSERT(self->magic == LMP_LSAP_MAGIC, return -EBADR;);
@@ -377,6 +376,10 @@ int irlmp_connect_request(struct lsap_cb
* device with the given daddr
*/
if (!saddr) {
+ discovery_t *discovery;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irlmp->cachelog->hb_spinlock, flags);
if (daddr != DEV_ADDR_ANY)
discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
else {
@@ -389,6 +392,7 @@ int irlmp_connect_request(struct lsap_cb
saddr = discovery->saddr;
daddr = discovery->daddr;
}
+ spin_unlock_irqrestore(&irlmp->cachelog->hb_spinlock, flags);
}
lap = hashbin_find(irlmp->links, saddr, NULL);
if (lap == NULL) {
diff -u -p linux/net/irda/discovery.d6.c linux/net/irda/discovery.c
--- linux/net/irda/discovery.d6.c Tue Jan 9 09:35:39 2001
+++ linux/net/irda/discovery.c Tue Jan 9 10:06:29 2001
@@ -61,7 +61,7 @@ void irlmp_add_discovery(hashbin_t *cach
/* Set time of first discovery if node is new (see below) */
new->first_timestamp = new->timestamp;
- spin_lock_irqsave(&irlmp->log_lock, flags);
+ spin_lock_irqsave(&cachelog->hb_spinlock, flags);
/*
* Remove all discoveries of devices that has previously been
@@ -95,7 +95,7 @@ void irlmp_add_discovery(hashbin_t *cach
/* Insert the new and updated version */
hashbin_insert(cachelog, (irda_queue_t *) new, new->daddr, NULL);
- spin_unlock_irqrestore(&irlmp->log_lock, flags);
+ spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
}
/*
@@ -146,7 +146,7 @@ void irlmp_expire_discoveries(hashbin_t
IRDA_DEBUG(4, __FUNCTION__ "()\n");
- spin_lock_irqsave(&irlmp->log_lock, flags);
+ spin_lock_irqsave(&log->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
@@ -169,7 +169,7 @@ void irlmp_expire_discoveries(hashbin_t
}
}
- spin_unlock_irqrestore(&irlmp->log_lock, flags);
+ spin_unlock_irqrestore(&log->hb_spinlock, flags);
}
/*
@@ -230,13 +230,13 @@ struct irda_device_info *irlmp_copy_disc
return NULL;
/* Save spin lock - spinlock should be discovery specific */
- spin_lock_irqsave(&irlmp->log_lock, flags);
+ spin_lock_irqsave(&log->hb_spinlock, flags);
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
if (buffer == NULL) {
- spin_unlock_irqrestore(&irlmp->log_lock, flags);
+ spin_unlock_irqrestore(&log->hb_spinlock, flags);
return NULL;
}
@@ -257,7 +257,7 @@ struct irda_device_info *irlmp_copy_disc
discovery = (discovery_t *) hashbin_get_next(log);
}
- spin_unlock_irqrestore(&irlmp->log_lock, flags);
+ spin_unlock_irqrestore(&log->hb_spinlock, flags);
/* Get the actual number of device in the buffer and return */
*pn = i;
@@ -276,7 +276,7 @@ __u32 irlmp_find_device(hashbin_t *cache
unsigned long flags;
discovery_t *d;
- spin_lock_irqsave(&irlmp->log_lock, flags);
+ spin_lock_irqsave(&cachelog->hb_spinlock, flags);
/* Look at all discoveries for that link */
d = (discovery_t *) hashbin_get_first(cachelog);
@@ -288,13 +288,13 @@ __u32 irlmp_find_device(hashbin_t *cache
if (strcmp(name, d->nickname) == 0) {
*saddr = d->saddr;
- spin_unlock_irqrestore(&irlmp->log_lock, flags);
+ spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
return d->daddr;
}
d = (discovery_t *) hashbin_get_next(cachelog);
}
- spin_unlock_irqrestore(&irlmp->log_lock, flags);
+ spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
return 0;
}
@@ -310,7 +310,7 @@ int discovery_proc_read(char *buf, char
{
discovery_t *discovery;
unsigned long flags;
- hashbin_t *cachelog = irlmp_get_cachelog();
+ hashbin_t *cachelog = irlmp->cachelog;
int len = 0;
if (!irlmp)
@@ -318,7 +318,7 @@ int discovery_proc_read(char *buf, char
len = sprintf(buf, "IrLMP: Discovery log:\n\n");
- spin_lock_irqsave(&irlmp->log_lock, flags);
+ spin_lock_irqsave(&cachelog->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(cachelog);
while (( discovery != NULL) && (len < length)) {
@@ -362,7 +362,7 @@ int discovery_proc_read(char *buf, char
discovery = (discovery_t *) hashbin_get_next(cachelog);
}
- spin_unlock_irqrestore(&irlmp->log_lock, flags);
+ spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
return len;
}
diff -u -p linux/net/irda/iriap.d6.c linux/net/irda/iriap.c
--- linux/net/irda/iriap.d6.c Tue Jan 9 11:23:50 2001
+++ linux/net/irda/iriap.c Tue Jan 9 11:40:42 2001
@@ -88,12 +88,13 @@ int __init iriap_init(void)
__u8 oct_seq[6];
__u16 hints;
- /* Allocate master array */
- iriap = hashbin_new(HB_LOCAL);
+ /* Allocate master array - HB_GLOBAL is plenty good enough */
+ iriap = hashbin_new(HB_GLOBAL);
if (!iriap)
return -ENOMEM;
- objects = hashbin_new(HB_LOCAL);
+ /* Object repository - defined in irias_object.c */
+ objects = hashbin_new(HB_LOCK);
if (!objects) {
WARNING(__FUNCTION__ "(), Can't allocate objects hashbin!\n");
return -ENOMEM;
@@ -968,24 +969,22 @@ static char *ias_value_types[] = {
"IAS_STRING"
};
-int irias_proc_read(char *buf, char **start, off_t offset, int len)
+int irias_proc_read(char *buf, char **start, off_t offset, int length)
{
struct ias_object *obj;
struct ias_attrib *attrib;
unsigned long flags;
+ int len = 0;
ASSERT( objects != NULL, return 0;);
- save_flags( flags);
- cli();
+ spin_lock_irqsave(&objects->hb_spinlock, flags);
- len = 0;
-
- len += sprintf(buf+len, "LM-IAS Objects:\n");
+ len = sprintf(buf+len, "LM-IAS Objects:\n");
/* List all objects */
obj = (struct ias_object *) hashbin_get_first(objects);
- while ( obj != NULL) {
+ while (( obj != NULL) && (len < length)) {
ASSERT(obj->magic == IAS_OBJECT_MAGIC, return 0;);
len += sprintf(buf+len, "name: %s, ", obj->name);
@@ -1030,7 +1029,7 @@ int irias_proc_read(char *buf, char **st
}
obj = (struct ias_object *) hashbin_get_next(objects);
}
- restore_flags(flags);
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
return len;
}
diff -u -p linux/net/irda/irlap.j1.c linux/net/irda/irlap.c
--- linux/net/irda/irlap.j1.c Tue Jan 9 10:32:52 2001
+++ linux/net/irda/irlap.c Tue Jan 9 10:44:11 2001
@@ -72,14 +72,14 @@ int irlap_proc_read(char *, char **, off
int __init irlap_init(void)
{
/* Allocate master array */
- irlap = hashbin_new(HB_LOCAL);
+ irlap = hashbin_new(HB_GLOBAL);
if (irlap == NULL) {
ERROR(__FUNCTION__ "(), can't allocate irlap hashbin!\n");
return -ENOMEM;
}
#ifdef CONFIG_IRDA_COMPRESSION
- irlap_compressors = hashbin_new(HB_LOCAL);
+ irlap_compressors = hashbin_new(HB_GLOBAL);
if (irlap_compressors == NULL) {
WARNING(__FUNCTION__
"(), can't allocate compressors hashbin!\n");
@@ -537,8 +537,9 @@ void irlap_discovery_request(struct irla
hashbin_delete(self->discovery_log, (FREE_FUNC) kfree);
self->discovery_log = NULL;
}
-
- self->discovery_log= hashbin_new(HB_LOCAL);
+
+ /* All operations will occur a predictable time, no need to lock */
+ self->discovery_log= hashbin_new(HB_NOLOCK);
info.S = discovery->nslots; /* Number of slots */
info.s = 0; /* Current slot */
diff -u -p linux/net/irda/irttp.j1.c linux/net/irda/irttp.c
--- linux/net/irda/irttp.j1.c Tue Jan 9 10:44:49 2001
+++ linux/net/irda/irttp.c Tue Jan 9 13:39:20 2001
@@ -89,7 +89,7 @@ int __init irttp_init(void)
irttp->magic = TTP_MAGIC;
- irttp->tsaps = hashbin_new(HB_LOCAL);
+ irttp->tsaps = hashbin_new(HB_LOCK);
if (!irttp->tsaps) {
ERROR(__FUNCTION__ "(), can't allocate IrTTP hashbin!\n");
return -ENOMEM;
@@ -1017,30 +1017,43 @@ int irttp_connect_response(struct tsap_c
struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
{
struct tsap_cb *new;
+ unsigned long flags;
IRDA_DEBUG(1, __FUNCTION__ "()\n");
- if (!hashbin_find(irttp->tsaps, (int) orig, NULL)) {
- IRDA_DEBUG(0, __FUNCTION__ "(), unable to find TSAP\n");
- return NULL;
- }
+ /* Allocate a new instance */
new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, __FUNCTION__ "(), unable to kmalloc\n");
return NULL;
}
+
+ /* Protect our access to the old tsap instance */
+ spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
+
+ /* Find the old instance */
+ if (!hashbin_find(irttp->tsaps, (int) orig, NULL)) {
+ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
+ kfree(new);
+ IRDA_DEBUG(0, __FUNCTION__ "(), unable to find TSAP\n");
+ return NULL;
+ }
/* Dup */
memcpy(new, orig, sizeof(struct tsap_cb));
- new->notify.instance = instance;
- new->lsap = irlmp_dup(orig->lsap, new);
+
+ /* We don't need the old instance any more */
+ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
/* Not everything should be copied */
+ new->notify.instance = instance;
+ new->lsap = irlmp_dup(orig->lsap, new);
init_timer(&new->todo_timer);
skb_queue_head_init(&new->rx_queue);
skb_queue_head_init(&new->tx_queue);
skb_queue_head_init(&new->rx_fragments);
+ /* This is locked */
hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (int) new, NULL);
return new;
@@ -1542,23 +1555,23 @@ static void irttp_start_todo_timer(struc
*
* Give some info to the /proc file system
*/
-int irttp_proc_read(char *buf, char **start, off_t offset, int len)
+int irttp_proc_read(char *buf, char **start, off_t offset, int length)
{
struct tsap_cb *self;
unsigned long flags;
int i = 0;
+ int len = 0;
ASSERT(irttp != NULL, return 0;);
- len = 0;
-
- save_flags(flags);
- cli();
+ /* Protect our access to the tsap list */
+ spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
- while (self != NULL) {
- if (!self || self->magic != TTP_TSAP_MAGIC)
- return len;
+ while ((self != NULL) && (len < length)) {
+
+ if ((!self) || (self->magic != TTP_TSAP_MAGIC))
+ goto out;
len += sprintf(buf+len, "TSAP %d, ", i++);
len += sprintf(buf+len, "stsap_sel: %02x, ",
@@ -1599,7 +1612,9 @@ int irttp_proc_read(char *buf, char **st
self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps);
}
- restore_flags(flags);
+
+out:
+ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return len;
}
diff -u -p linux/net/irda/irias_object.j1.c linux/net/irda/irias_object.c
--- linux/net/irda/irias_object.j1.c Tue Jan 9 11:29:50 2001
+++ linux/net/irda/irias_object.c Tue Jan 9 12:04:44 2001
@@ -84,7 +84,9 @@ struct ias_object *irias_new_object( cha
obj->name = strdup( name);
obj->id = id;
- obj->attribs = hashbin_new(HB_LOCAL);
+ /* We use the master lock objects->hb_spinlock, so we don't need
+ * to lock those guys */
+ obj->attribs = hashbin_new(HB_NOLOCK);
return obj;
}
@@ -157,13 +159,16 @@ int irias_delete_object(struct ias_objec
int irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib)
{
struct ias_attrib *node;
+ unsigned long flags;
ASSERT(obj != NULL, return -1;);
ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -1;);
ASSERT(attrib != NULL, return -1;);
/* Remove atribute from object */
+ spin_lock_irqsave(&objects->hb_spinlock, flags);
node = hashbin_remove(obj->attribs, 0, attrib->name);
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
if (!node)
return 0; /* Already removed or non-existent */
@@ -200,9 +205,16 @@ void irias_insert_object(struct ias_obje
*/
struct ias_object *irias_find_object(char *name)
{
+ unsigned long flags;
+ struct ias_object *entry;
ASSERT(name != NULL, return NULL;);
- return hashbin_find(objects, 0, name);
+ spin_lock_irqsave(&objects->hb_spinlock, flags);
+ entry = hashbin_find(objects, 0, name);
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
+
+ /* This is unsafe, but what should we do ? - Jean II */
+ return(entry);
}
/*
@@ -213,16 +225,20 @@ struct ias_object *irias_find_object(cha
*/
struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name)
{
+ unsigned long flags;
struct ias_attrib *attrib;
ASSERT(obj != NULL, return NULL;);
ASSERT(obj->magic == IAS_OBJECT_MAGIC, return NULL;);
ASSERT(name != NULL, return NULL;);
+ spin_lock_irqsave(&objects->hb_spinlock, flags);
attrib = hashbin_find(obj->attribs, 0, name);
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
if (attrib == NULL)
return NULL;
+ /* This is unsafe, but what should we do ? - Jean II */
return attrib;
}
@@ -235,6 +251,8 @@ struct ias_attrib *irias_find_attrib(str
void irias_add_attrib( struct ias_object *obj, struct ias_attrib *attrib,
int owner)
{
+ unsigned long flags;
+
ASSERT(obj != NULL, return;);
ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
@@ -244,7 +262,9 @@ void irias_add_attrib( struct ias_object
/* Set if attrib is owned by kernel or user space */
attrib->value->owner = owner;
+ spin_lock_irqsave(&objects->hb_spinlock, flags);
hashbin_insert(obj->attribs, (irda_queue_t *) attrib, 0, attrib->name);
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
}
/*
@@ -258,10 +278,15 @@ int irias_object_change_attribute(char *
{
struct ias_object *obj;
struct ias_attrib *attrib;
+ unsigned long flags;
+
+ /* Protect ourselves */
+ spin_lock_irqsave(&objects->hb_spinlock, flags);
/* Find object */
obj = hashbin_find(objects, 0, obj_name);
if (obj == NULL) {
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
WARNING(__FUNCTION__ "(), Unable to find object: %s\n",
obj_name);
return -1;
@@ -270,12 +295,14 @@ int irias_object_change_attribute(char *
/* Find attribute */
attrib = hashbin_find(obj->attribs, 0, attrib_name);
if (attrib == NULL) {
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
WARNING(__FUNCTION__ "(), Unable to find attribute: %s\n",
attrib_name);
return -1;
}
if ( attrib->value->type != new_value->type) {
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
IRDA_DEBUG( 0, __FUNCTION__
"(), changing value type not allowed!\n");
return -1;
@@ -286,6 +313,9 @@ int irias_object_change_attribute(char *
/* Insert new value */
attrib->value = new_value;
+
+ /* Finished */
+ spin_unlock_irqrestore(&objects->hb_spinlock, flags);
/* Success */
return 0;
diff -u -p linux/drivers/net/irda/irtty.j1.c linux/drivers/net/irda/irtty.c
--- linux/drivers/net/irda/irtty.j1.c Tue Jan 9 11:58:04 2001
+++ linux/drivers/net/irda/irtty.c Tue Jan 9 11:58:38 2001
@@ -77,7 +77,7 @@ int __init irtty_init(void)
{
int status;
- irtty = hashbin_new( HB_LOCAL);
+ irtty = hashbin_new( HB_GLOBAL); /* HB_GLOBAL is good enough */
if ( irtty == NULL) {
printk( KERN_WARNING "IrDA: Can't allocate irtty hashbin!\n");
return -ENOMEM;
diff -urpN linux-2.4.1-pre8/net/irda/ircomm/ircomm_core.c
linux-2.4.1-pre8-irda-patch/net/irda/ircomm/ircomm_core.c
--- linux-2.4.1-pre8/net/irda/ircomm/ircomm_core.c Tue Nov 28 03:07:31 2000
+++ linux-2.4.1-pre8-irda-patch/net/irda/ircomm/ircomm_core.c Mon Jan 22 00:54:42
+2001
@@ -61,7 +61,7 @@ hashbin_t *ircomm = NULL;
int __init ircomm_init(void)
{
- ircomm = hashbin_new(HB_LOCAL);
+ ircomm = hashbin_new(HB_LOCK);
if (ircomm == NULL) {
ERROR(__FUNCTION__ "(), can't allocate hashbin!\n");
return -ENOMEM;
diff -urpN linux-2.4.1-pre8/net/irda/ircomm/ircomm_tty.c
linux-2.4.1-pre8-irda-patch/net/irda/ircomm/ircomm_tty.c
--- linux-2.4.1-pre8/net/irda/ircomm/ircomm_tty.c Tue Nov 28 03:07:31 2000
+++ linux-2.4.1-pre8-irda-patch/net/irda/ircomm/ircomm_tty.c Mon Jan 22 00:54:43
+2001
@@ -91,7 +91,7 @@ hashbin_t *ircomm_tty = NULL;
*/
int __init ircomm_tty_init(void)
{
- ircomm_tty = hashbin_new(HB_LOCAL);
+ ircomm_tty = hashbin_new(HB_LOCK);
if (ircomm_tty == NULL) {
ERROR(__FUNCTION__ "(), can't allocate hashbin!\n");
return -ENOMEM;
diff -urpN linux-2.4.1-pre8/net/irda/irlan/irlan_common.c
linux-2.4.1-pre8-irda-patch/net/irda/irlan/irlan_common.c
--- linux-2.4.1-pre8/net/irda/irlan/irlan_common.c Sun Nov 12 03:11:23 2000
+++ linux-2.4.1-pre8-irda-patch/net/irda/irlan/irlan_common.c Mon Jan 22 00:54:44
+2001
@@ -123,8 +123,9 @@ int __init irlan_init(void)
__u16 hints;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
+
/* Allocate master structure */
- irlan = hashbin_new(HB_LOCAL);
+ irlan = hashbin_new(HB_LOCK);
if (irlan == NULL) {
printk(KERN_WARNING "IrLAN: Can't allocate hashbin!\n");
return -ENOMEM;