On Thu, 2005-07-07 at 22:19 -0700, David S. Miller wrote:
> I got inspired eariler today, and found that it seemed
> it might be easy to kill off the 'list' member of
> struct sk_buff without changing sk_buff_head at all.
>
> I got very far. Nearly every single piece of code was
> easy to change to pass in an explicit SKB list instead
> of skb->list to the SKB queue management functions.
>
> The big exception was SCTP. I can't believe after being
> in the kernel for several years it has all of this complicated
> list handling, SKB structure overlaying, and casting all over
> the place. It was a big downer after a very positive day of
> coding.
>
> First, it casts "struct sctp_chunk *" pointers to
> "struct sk_buff *" so that it can "borrow" the SKB list
> handling functions. I just copied over the skb_*() routines
> it used in this way to be sctp_chunk_*(), and used them
> throughout and eliminated the ugly casts. This can be
> simplified a lot further, since it really doesn't care about
> 'qlen'. In fact, what it wants is just the most basic list
> handling, ala linux/list.h So just sticking a list_head
> into sctp_chunk and replacing sctp_chunk_list with a list_head
> as well should do the trick.
I guess we could use the generic lists rather than skb list. But
your sctp_chunk_list looks fine for now except for a minor
bug in __sctp_chunk_dequeue(). You missed resetting result->list
to NULL.
>
> Some of the rest of the SCTP stuff was transformable with not
> too much effort.
>
> But then I really got stymied by the reassembly and partial queue
> handling. These SCTP ulp event things make a layer of abstraction to
> the skb_unlink() point such that you can't know what list the SKB is
> on. One way to deal with this is to store the list pointer in
> the event struct, and that's likely what will happen at first. This
> isn't trivial because you have to make sure the assignment is done
> at all of the receive packet list insertion points, some places
> even use sk_buff_head lists on the local stack making this chore
> even more "exciting" :(
Storing list pointer in struct ulpevent seems to be the simplest way
to fix this. But ulpevent is embedded in the cb[40] field of struct
sk_buff and the size of the ulpevent structure is already 34 bytes on
64-bit systems. So adding a pointer increases the size to 42 bytes
and causes it to overflow. I am not sure if you are OK with increasing
cb array to 42 bytes considering that you are trying to reduce the
size of sk_buff.
Anyway, the following patch on top of yours makes SCTP build and work
without skb->list on 32-bit systems.
Thanks
Sridhar
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -719,6 +719,7 @@ static inline struct sctp_chunk *__sctp_
next->prev = prev;
prev->next = next;
result->next = result->prev = NULL;
+ result->list = NULL;
}
return result;
}
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -63,6 +63,7 @@ struct sctp_ulpevent {
__u32 cumtsn;
int msg_flags;
int iif;
+ struct sk_buff_head *list;
};
/* Retrieve the skb this event sits inside of. */
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -121,6 +121,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
+ event->list = &temp;
event = sctp_ulpq_order(ulpq, event);
}
@@ -197,10 +198,12 @@ int sctp_ulpq_tail_event(struct sctp_ulp
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
- if (sctp_event2skb(event)->list)
- sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
- else
+ if (event->list)
+ sctp_skb_list_tail(event->list, queue);
+ else {
__skb_queue_tail(queue, sctp_event2skb(event));
+ event->list = queue;
+ }
/* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive
@@ -214,8 +217,8 @@ int sctp_ulpq_tail_event(struct sctp_ulp
return 1;
out_free:
- if (sctp_event2skb(event)->list)
- sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
+ if (event->list)
+ sctp_queue_purge_ulpevents(event->list);
else
sctp_ulpevent_free(event);
return 0;
@@ -237,6 +240,7 @@ static inline void sctp_ulpq_store_reasm
pos = skb_peek_tail(&ulpq->reasm);
if (!pos) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ event->list = &ulpq->reasm;
return;
}
@@ -245,6 +249,7 @@ static inline void sctp_ulpq_store_reasm
ctsn = cevent->tsn;
if (TSN_lt(ctsn, tsn)) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ event->list = &ulpq->reasm;
return;
}
@@ -259,6 +264,7 @@ static inline void sctp_ulpq_store_reasm
/* Insert before pos. */
__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
+ event->list = &ulpq->reasm;
}
@@ -295,6 +301,7 @@ static struct sctp_ulpevent *sctp_make_r
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, &ulpq->reasm);
+ sctp_skb2event(f_frag)->list = NULL;
while (pos) {
pnext = pos->next;
@@ -305,6 +312,7 @@ static struct sctp_ulpevent *sctp_make_r
/* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, &ulpq->reasm);
+ sctp_skb2event(pos)->list = NULL;
/* Break if we have reached the last fragment. */
if (pos == l_frag)
@@ -568,9 +576,11 @@ static inline void sctp_ulpq_retrieve_or
sctp_ssn_next(in, sid);
__skb_unlink(pos, &ulpq->lobby);
+ sctp_skb2event(pos)->list = NULL;
/* Attach all gathered skbs to the event. */
- __skb_queue_tail(sctp_event2skb(event)->list, pos);
+ __skb_queue_tail(event->list, pos);
+ sctp_skb2event(pos)->list = event->list;
}
}
@@ -586,6 +596,7 @@ static inline void sctp_ulpq_store_order
pos = skb_peek_tail(&ulpq->lobby);
if (!pos) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ event->list = &ulpq->lobby;
return;
}
@@ -597,11 +608,13 @@ static inline void sctp_ulpq_store_order
cssn = cevent->ssn;
if (sid > csid) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ event->list = &ulpq->lobby;
return;
}
if ((sid == csid) && SSN_lt(cssn, ssn)) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ event->list = &ulpq->lobby;
return;
}
@@ -622,6 +635,7 @@ static inline void sctp_ulpq_store_order
/* Insert before pos. */
__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
+ event->list = &ulpq->lobby;
}
@@ -687,14 +701,17 @@ static inline void sctp_ulpq_reap_ordere
sctp_ssn_next(in, csid);
__skb_unlink(pos, &ulpq->lobby);
+ sctp_skb2event(pos)->list = NULL;
if (!event) {
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
+ event->list = &temp;
} else {
/* Attach all gathered skbs to the event. */
__skb_queue_tail(&temp, pos);
+ sctp_skb2event(pos)->list = &temp;
}
}
@@ -855,8 +872,10 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
gfp);
- if (ev)
+ if (ev) {
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
+ ev->list = &sk->sk_receive_queue;
+ }
/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html