Common subdirectories: wap/CVS and ../gateway-cvs/wap/CVS
diff -ub wap/wap.c ../gateway-cvs/wap/wap.c
--- wap/wap.c	Mon Nov 20 11:55:54 2000
+++ ../gateway-cvs/wap/wap.c	Tue Oct 15 05:33:32 2002
@@ -10,11 +10,12 @@
 
 void wap_dispatch_datagram(WAPEvent *dgram)
 {
+    debug("wap.wap", 0, "XXXXXXX: wap_dispatch_datagram");
+
     gw_assert(dgram != NULL);
 
     if (dgram->type != T_DUnitdata_Ind) {
 	warning(0, "wap_dispatch_datagram got event of unexpected type.");
-	wap_event_dump(dgram);
 	wap_event_destroy(dgram);
         return;
     }
@@ -32,6 +33,9 @@
 	    WAPEvent *event;
 
 	    event = list_extract_first(events);
+	    debug("wap.wap", 0, "XXXXXXX: wap_dispatch_datagram <---------------");
+     	    wap_event_dump(event);
+
             if (wtp_event_is_for_responder(event))
                 wtp_resp_dispatch_event(event);
             else
diff -ub wap/wap_events.def ../gateway-cvs/wap/wap_events.def
--- wap/wap_events.def	Thu Aug 22 10:31:18 2002
+++ ../gateway-cvs/wap/wap_events.def	Sun Oct 27 23:12:03 2002
@@ -458,6 +458,25 @@
 	INTEGER(tid)
 	INTEGER(tid_ok)
 	INTEGER(rid)
+	INTEGER(psnr)
+	ADDRTUPLE(addr_tuple)
+	)
+
+WAPEVENT(RcvNack, "RcvNack",
+	INTEGER(tid)
+	INTEGER(rid)
+	INTEGER(nmissing)
+	OPTIONAL_OCTSTR(missing)
+	ADDRTUPLE(addr_tuple)
+	)
+
+WAPEVENT(RcvSegmInvoke, "RcvSegmInvoke",
+        OCTSTR(user_data)
+        INTEGER(tid)
+        INTEGER(gtr)
+        INTEGER(ttr)
+	INTEGER(psn)
+	INTEGER(rid)
 	ADDRTUPLE(addr_tuple)
 	)
 
diff -ub wap/wsp_pdu.c ../gateway-cvs/wap/wsp_pdu.c
--- wap/wsp_pdu.c	Mon Nov 20 11:55:54 2000
+++ ../gateway-cvs/wap/wsp_pdu.c	Sun Oct 27 23:12:04 2002
@@ -123,6 +123,8 @@
 	WSP_PDU *pdu = NULL;
 	long bitpos = 0;
 
+	debug("wap.wsp", 0, "XXXXXXX: wsp_pdu_unpack");
+
 	gw_assert(data != NULL);
 
 	pdu = gw_malloc(sizeof(*pdu));
diff -ub wap/wsp_session.c ../gateway-cvs/wap/wsp_session.c
--- wap/wsp_session.c	Tue May 14 04:05:08 2002
+++ ../gateway-cvs/wap/wsp_session.c	Sun Oct 27 23:12:04 2002
@@ -167,6 +167,9 @@
 
 
 void wsp_session_dispatch_event(WAPEvent *event) {
+
+	debug("wap.wSp", 0, "XXXXXXX: wsp_session_dispatch_event");
+
 	wap_event_assert(event);
 	list_produce(queue, event);
 }
diff -ub wap/wtp.c ../gateway-cvs/wap/wtp.c
--- wap/wtp.c	Thu Aug 22 10:31:18 2002
+++ ../gateway-cvs/wap/wtp.c	Sun Oct 27 23:12:04 2002
@@ -29,6 +29,7 @@
 static WAPEvent *unpack_result(WTP_PDU *pdu, WAPAddrTuple *addr_tuple);
 static WAPEvent *unpack_ack(WTP_PDU *pdu, WAPAddrTuple *addr_tuple);
 static WAPEvent *unpack_abort(WTP_PDU *pdu, WAPAddrTuple *addr_tuple);
+static WAPEvent *unpack_segmented_invoke(WTP_PDU *pdu, WAPAddrTuple *addr_tuple);
 static WAPEvent *pack_error(WAPEvent *datagram);
 
 /******************************************************************************
@@ -45,6 +46,8 @@
      Octstr *data = NULL;
      long pdu_len;
 
+     debug("wap.wtp", 0, "XXXXXXX: wtp_unpack_wdp_datagram");
+
      gw_assert(datagram->type == T_DUnitdata_Ind);
 
      events = list_create();
@@ -97,17 +100,25 @@
 int wtp_event_is_for_responder(WAPEvent *event)
 {
 
+     debug("wap.wtp", 0, "XXXXXXX: wtp_event_is_for_responder");
+
      switch(event->type){
           
      case RcvInvoke:
          return event->u.RcvInvoke.tid < INITIATOR_TID_LIMIT;
 
+     case RcvSegmInvoke:
+         return event->u.RcvSegmInvoke.tid < INITIATOR_TID_LIMIT;
+
      case RcvResult:
          return event->u.RcvResult.tid < INITIATOR_TID_LIMIT;
 
      case RcvAck:
         return event->u.RcvAck.tid < INITIATOR_TID_LIMIT;
 
+     case RcvNack:
+	 return event->u.RcvNack.tid < INITIATOR_TID_LIMIT;
+
      case RcvAbort:
         return event->u.RcvAbort.tid < INITIATOR_TID_LIMIT;
 
@@ -129,6 +140,8 @@
  */
 static int truncated_datagram(WAPEvent *dgram)
 {
+    debug("wap.wtp", 0, "XXXXXXX: truncated_datagram");
+
     gw_assert(dgram->type == T_DUnitdata_Ind);
 
     if (octstr_len(dgram->u.T_DUnitdata_Ind.user_data) < 3) {
@@ -143,6 +156,8 @@
 {
     WAPEvent *event;
 
+    debug("wap.wtp", 0, "XXXXXXX: unpack_invoke");
+
     event = wap_event_create(RcvInvoke);
     event->u.RcvInvoke.user_data = 
         octstr_duplicate(pdu->u.Invoke.user_data);
@@ -164,6 +179,8 @@
 {
     WAPEvent *event;
 
+    debug("wap.wtp", 0, "XXXXXXX: unpack_result");
+
     event = wap_event_create(RcvResult);
     event->u.RcvResult.user_data = 
         octstr_duplicate(pdu->u.Result.user_data);
@@ -179,20 +196,53 @@
 static WAPEvent *unpack_ack(WTP_PDU *pdu, WAPAddrTuple *addr_tuple)
 {
     WAPEvent *event;
+    WTP_TPI *tpi;
+    int num_tpis, i;
+
+    debug("wap.wtp", 0, "XXXXXXX: unpack_ack");
 
     event = wap_event_create(RcvAck);
     event->u.RcvAck.tid = pdu->u.Ack.tid;
     event->u.RcvAck.tid_ok = pdu->u.Ack.tidverify;
     event->u.RcvAck.rid = pdu->u.Ack.rid;
+    
+    if (pdu->u.Ack.con == 1) {
+	num_tpis = list_len(pdu->options);
+	for (i = 0; i < num_tpis; i++) {
+	    tpi = list_get(pdu->options, i);
+	
+	    if (tpi->type == 0x03)
+		event->u.RcvAck.psnr = octstr_get_char(tpi->data, 0);
+	}
+    }
+
     event->u.RcvAck.addr_tuple = wap_addr_tuple_duplicate(addr_tuple);
 
     return event;
 }
 
+static WAPEvent *unpack_nack(WTP_PDU *pdu, WAPAddrTuple *addr_tuple)
+{
+    WAPEvent *event;
+
+    debug("wap.wtp", 0, "XXXXXXX: unpack_nack");
+
+    event = wap_event_create(RcvNack);
+    event->u.RcvNack.tid = pdu->u.Negative_ack.tid;
+    event->u.RcvNack.rid = pdu->u.Negative_ack.rid;
+    event->u.RcvNack.nmissing = pdu->u.Negative_ack.nmissing;
+    event->u.RcvNack.missing =  octstr_duplicate(pdu->u.Negative_ack.missing);
+    event->u.RcvNack.addr_tuple = wap_addr_tuple_duplicate(addr_tuple);
+
+    return event;
+}
+
 static WAPEvent *unpack_abort(WTP_PDU *pdu, WAPAddrTuple *addr_tuple)
 {
      WAPEvent *event;
 
+     debug("wap.wtp", 0, "XXXXXXX: unpack_abort");
+
      event = wap_event_create(RcvAbort);
      event->u.RcvAbort.tid = pdu->u.Abort.tid;
      event->u.RcvAbort.abort_type = pdu->u.Abort.abort_type;
@@ -202,10 +252,31 @@
      return event;
 }
 
+static WAPEvent *unpack_segmented_invoke(WTP_PDU *pdu, WAPAddrTuple *addr_tuple)
+{
+    WAPEvent *event;
+
+    debug("wap.wtp", 0, "XXXXXXX: unpack_segmented_invoke");
+
+    event = wap_event_create(RcvSegmInvoke);
+
+    event->u.RcvSegmInvoke.tid = pdu->u.Segmented_invoke.tid;
+    event->u.RcvSegmInvoke.gtr = pdu->u.Segmented_invoke.gtr;
+    event->u.RcvSegmInvoke.ttr = pdu->u.Segmented_invoke.ttr;
+    event->u.RcvSegmInvoke.rid = pdu->u.Segmented_invoke.rid;
+    event->u.RcvSegmInvoke.psn = pdu->u.Segmented_invoke.psn;
+    event->u.RcvSegmInvoke.user_data = octstr_duplicate(pdu->u.Segmented_invoke.user_data);
+    event->u.RcvSegmInvoke.addr_tuple = wap_addr_tuple_duplicate(addr_tuple);
+
+    return event;
+}
+
 static WAPEvent *pack_error(WAPEvent *datagram)
 {
     WAPEvent *event;
 
+    debug("wap.wtp", 0, "XXXXXXX: pack_error");
+
     gw_assert(datagram->type == T_DUnitdata_Ind);
 
     event = wap_event_create(RcvErrorPDU);
@@ -234,6 +305,8 @@
     WAPEvent *event;
     Octstr *data;
 
+    debug("wap.wtp", 0, "XXXXXXX: unpack_wdp_datagram_real");
+
     gw_assert(datagram->type == T_DUnitdata_Ind);
 
     data = datagram->u.T_DUnitdata_Ind.user_data;
@@ -280,9 +353,17 @@
 	    event = unpack_ack(pdu, datagram->u.T_DUnitdata_Ind.addr_tuple);    
         break;
 
+        case Negative_ack:
+	    event = unpack_nack(pdu, datagram->u.T_DUnitdata_Ind.addr_tuple);    
+        break;
+
 	case Abort:
 	    event = unpack_abort(pdu, datagram->u.T_DUnitdata_Ind.addr_tuple);
         break;         
+
+	case Segmented_invoke:
+	    event = unpack_segmented_invoke(pdu, datagram->u.T_DUnitdata_Ind.addr_tuple);
+	    break;
 
 	default:
 	    event = pack_error(datagram);
diff -ub wap/wtp_init.c ../gateway-cvs/wap/wtp_init.c
--- wap/wtp_init.c	Tue Mar 27 06:35:03 2001
+++ ../gateway-cvs/wap/wtp_init.c	Sun Oct 27 23:12:05 2002
@@ -141,6 +141,8 @@
 
 void wtp_initiator_dispatch_event(WAPEvent *event) 
 {
+    debug("wap.wtp", 0, "XXXXXXX: wtp_initiator_dispatch_event");
+
     list_produce(queue, event);
 }
 
diff -ub wap/wtp_pack.c ../gateway-cvs/wap/wtp_pack.c
--- wap/wtp_pack.c	Tue Mar 27 06:35:17 2001
+++ ../gateway-cvs/wap/wtp_pack.c	Sun Oct 27 23:12:05 2002
@@ -106,6 +106,46 @@
     return dgram;
 }
 
+WAPEvent *wtp_segm_pack_result(WTPRespMachine *machine, int psn, int gtr, int ttr)
+{
+    WAPEvent *dgram = NULL;
+    WTP_PDU *pdu = NULL;
+    Octstr *psntmp;
+     
+    debug("wap.wtp", 0, "XXXXXXX: wtp_segm_pack_result psn=%d gtr=%d ttr=%d list_len=%d segm_count=%d", psn, gtr, ttr, list_len(machine->segments), machine->segm_count);
+
+    gw_assert(psn < machine->segm_count);
+
+    if (psn == 0) {
+        pdu = wtp_pdu_create(Result);
+        pdu->u.Result.con = 0;
+	pdu->u.Result.gtr = gtr;
+    	pdu->u.Result.ttr = ttr;
+        pdu->u.Result.rid = machine->rid;
+        pdu->u.Result.tid = send_tid(machine->tid);
+        pdu->u.Result.user_data = octstr_duplicate(list_get(machine->segments, 0));
+    } else {
+        pdu = wtp_pdu_create(Segmented_result);
+        
+        pdu->u.Segmented_result.con = 0;
+    
+	pdu->u.Segmented_result.gtr = gtr;
+	pdu->u.Segmented_result.ttr = ttr;
+        pdu->u.Segmented_result.rid = machine->rid;
+        pdu->u.Segmented_result.tid = send_tid(machine->tid);
+        pdu->u.Segmented_result.psn = psn;
+
+        pdu->u.Segmented_result.user_data = octstr_duplicate(list_get(machine->segments, psn));
+    }
+    
+    dgram = wap_event_create(T_DUnitdata_Req);
+    dgram->u.T_DUnitdata_Req.addr_tuple = wap_addr_tuple_duplicate(machine->addr_tuple);
+    dgram->u.T_DUnitdata_Req.user_data = wtp_pdu_pack(pdu);
+    wtp_pdu_destroy(pdu);
+    
+    return dgram;
+}
+
 void wtp_pack_set_rid(WAPEvent *dgram, long rid)
 {
     gw_assert(dgram != NULL);
@@ -149,6 +189,68 @@
     dgram = wap_event_create(T_DUnitdata_Req);
     dgram->u.T_DUnitdata_Req.addr_tuple = wap_addr_tuple_duplicate(address);
     dgram->u.T_DUnitdata_Req.user_data = wtp_pdu_pack(pdu);
+    wtp_pdu_dump(pdu, 0);
+    wtp_pdu_destroy(pdu);
+
+    return dgram;
+}
+
+WAPEvent *wtp_pack_segmack(long ack_type, int rid_flag, long tid, int psnr, 
+                       WAPAddrTuple *address)
+{
+    WAPEvent *dgram = NULL;
+    WTP_PDU *pdu;
+    Octstr *o_psnr = octstr_create("0");
+     
+    debug("wap.wtp", 0, "XXXXXXX: wtp_pack_segmack PSNR=%d", psnr);
+
+    pdu = wtp_pdu_create(Ack);
+    pdu->u.Ack.con = 1;
+    pdu->u.Ack.tidverify = ack_type;
+    pdu->u.Ack.rid = rid_flag;
+    pdu->u.Ack.tid = send_tid(tid);
+    octstr_set_char(o_psnr, 0, psnr);
+    
+    wtp_pdu_append_tpi(pdu, 0x03, o_psnr);
+
+    dgram = wap_event_create(T_DUnitdata_Req);
+    dgram->u.T_DUnitdata_Req.addr_tuple = wap_addr_tuple_duplicate(address);
+    
+    dgram->u.T_DUnitdata_Req.user_data = wtp_pdu_pack(pdu);
+    wtp_pdu_dump(pdu, 0);
+    wtp_pdu_destroy(pdu);
+
+    return dgram;
+}
+
+
+WAPEvent *wtp_pack_segmnack(int rid_flag, long tid, List *nack_list, 
+                       WAPAddrTuple *address)
+{
+    WAPEvent *dgram = NULL;
+    WTP_PDU *pdu;
+    Octstr *o_psnr = octstr_create("");
+    int i;
+     
+    debug("wap.wtp", 0, "XXXXXXX: wtp_pack_segmnack");
+
+    pdu = wtp_pdu_create(Negative_ack);
+    pdu->u.Negative_ack.con = 0;
+    pdu->u.Negative_ack.rid = rid_flag;
+    pdu->u.Negative_ack.tid = send_tid(tid);
+    pdu->u.Negative_ack.nmissing = list_len(nack_list);
+
+    for (i=0; i<pdu->u.Negative_ack.nmissing; i++)
+    	octstr_append(o_psnr, list_get(nack_list, i));
+
+    if (pdu->u.Negative_ack.nmissing > 0)
+    	pdu->u.Negative_ack.missing = o_psnr;
+    	
+    dgram = wap_event_create(T_DUnitdata_Req);
+    dgram->u.T_DUnitdata_Req.addr_tuple = wap_addr_tuple_duplicate(address);
+    
+    dgram->u.T_DUnitdata_Req.user_data = wtp_pdu_pack(pdu);
+    wtp_pdu_dump(pdu, 0);
     wtp_pdu_destroy(pdu);
 
     return dgram;
diff -ub wap/wtp_pack.h ../gateway-cvs/wap/wtp_pack.h
--- wap/wtp_pack.h	Mon Nov 20 11:55:54 2000
+++ ../gateway-cvs/wap/wtp_pack.h	Sun Oct 27 23:12:05 2002
@@ -33,6 +33,9 @@
  */
 
 WAPEvent *wtp_pack_result(WTPRespMachine *resp_machine, WAPEvent *event); 
+WAPEvent *wtp_segm_pack_result(WTPRespMachine *machine, int psn, int gtr, int ttr);
+WAPEvent *wtp_pack_segmack(long ack_type, int rid_flag, long tid, int psnr, WAPAddrTuple *address);
+WAPEvent *wtp_pack_segmnack(int rid_flag, long tid, List *nack_list, WAPAddrTuple *address);
 
 /*
  * Create a datagram event, having abort PDU as user data. Fetches SDU
diff -ub wap/wtp_resp.c ../gateway-cvs/wap/wtp_resp.c
--- wap/wtp_resp.c	Fri Apr 19 09:01:07 2002
+++ ../gateway-cvs/wap/wtp_resp.c	Tue Nov  5 03:19:48 2002
@@ -133,6 +133,13 @@
  */
 static void send_ack(WTPRespMachine *machine, long ack_type, int rid_flag);
 
+static void send_segm_ack(WTPRespMachine *machine, long ack_type, int rid_flag, int psnr);
+static Octstr *cat_segments(WTPRespMachine *machine);
+static void create_segments(WTPRespMachine *machine, Octstr *user_data);
+static void wtp_check_segments(WTPRespMachine *machine, int grp_psn, int ttr);
+static void wtp_send_segm(WTPRespMachine *machine);
+static void wtp_resend_segm(WTPRespMachine *machine, WAPEvent *event);
+static int wtp_check_group(WTPRespMachine *machine, int grp_psn);
 
 /******************************************************************************
  *
@@ -144,6 +151,8 @@
                    wap_dispatch_func_t *session_dispatch,
                    wap_dispatch_func_t *push_dispatch) 
 {
+    debug("wap.wtp", 0, "XXXXXXX: wtp_resp_init");
+
     resp_machines = list_create();
     resp_machine_id_counter = counter_create();
 
@@ -164,6 +173,8 @@
 
 void wtp_resp_shutdown(void) 
 {
+    debug("wap.wtp", 0, "XXXXXXX: wtp_resp_shutdown");
+
     gw_assert(resp_run_status == running);
     resp_run_status = terminating;
     list_remove_producer(resp_queue);
@@ -182,6 +193,7 @@
 
 void wtp_resp_dispatch_event(WAPEvent *event) 
 {
+    debug("wap.wtp", 0, "XXXXXXX: wtp_resp_dispatch_event");
     list_produce(resp_queue, event);
 }
 
@@ -197,6 +209,8 @@
     WTPRespMachine *sm;
     WAPEvent *e;
 
+    debug("wap.wtp", 0, "XXXXXXX: main_thread");
+
     while (resp_run_status == running && 
            (e = list_consume(resp_queue)) != NULL) {
 	sm = resp_machine_find_or_create(e);
@@ -212,6 +226,8 @@
  */
 static unsigned char *name_resp_state(int s)
 {
+    debug("wap.wtp", 0, "XXXXXXX: name_resp_state");
+
        switch (s) {
        #define STATE_NAME(state) case state: return #state;
        #define ROW(state, event, condition, action, new_state)
@@ -266,6 +282,8 @@
 {       
     WAPEvent *ab;
 
+    debug("wap.wtp", 0, "XXXXXXX: handle_wrong_version");
+
     if (event->type == RcvInvoke) {
         ab = wtp_pack_abort(PROVIDER, WTPVERSIONZERO, event->u.RcvInvoke.tid, 
                             event->u.RcvInvoke.addr_tuple);
@@ -280,6 +298,8 @@
 {
     WAPEvent *ab;
 
+    debug("wap.wtp", 0, "XXXXXXX: handle_no_sar");
+
     if (event->type == RcvInvoke) {
         ab = wtp_pack_abort(PROVIDER, NOTIMPLEMENTEDSAR, 
                             event->u.RcvInvoke.tid,
@@ -297,6 +317,8 @@
      * If clients request WTP-SAR should we force to continue
      * or act as be should do by telling the client to call back.
      */
+    debug("wap.wtp", 0, "XXXXXXX: erroneous_field_in");
+
     if (wtp_forced_sar) 
         return 0;
 
@@ -310,6 +332,8 @@
  */
 static void handle_erroneous_field_in(WAPEvent *event)
 {
+    debug("wap.wtp", 0, "XXXXXXX: handle_erroneous_field_in");
+
     if (event->type == RcvInvoke){
         if (event->u.RcvInvoke.version != 0){
 	   debug("wap.wtp_resp", 0, "WTP_RESP: wrong version, aborting"
@@ -343,6 +367,8 @@
     long tid, mid;
     WAPAddrTuple *tuple;
 
+    debug("wap.wtp", 0, "XXXXXXX: resp_machine_find_or_create");
+
     tid = -1;
     tuple = NULL;
     mid = -1;
@@ -359,11 +385,27 @@
             }
             break;
 
+        case RcvSegmInvoke:
+            /* check if erroneous fields are given */
+            if (erroneous_field_in(event)) {
+                handle_erroneous_field_in(event);
+                return NULL;
+            } else {
+                tid = event->u.RcvSegmInvoke.tid;
+                tuple = event->u.RcvSegmInvoke.addr_tuple;
+            }
+            break;
+
        case RcvAck:
             tid = event->u.RcvAck.tid;
             tuple = event->u.RcvAck.addr_tuple;
             break;
 
+       case RcvNack:
+            tid = event->u.RcvNack.tid;
+            tuple = event->u.RcvNack.addr_tuple;
+            break;
+
         case RcvAbort:
             tid = event->u.RcvAbort.tid;
             tuple = event->u.RcvAbort.addr_tuple;
@@ -471,6 +513,8 @@
     machine_pattern *pat;
     WTPRespMachine *m;
 	
+    debug("wap.wtp", 0, "XXXXXXX: is_wanted_resp_machine");
+
     m = a;
     pat = b;
 
@@ -491,6 +535,8 @@
     machine_pattern pat;
     WTPRespMachine *m;
 	
+    debug("wap.wtp", 0, "XXXXXXX: resp_machine_find");
+
     pat.tuple = tuple;
     pat.tid = tid;
     pat.mid = mid;
@@ -505,6 +551,8 @@
 {
     WTPRespMachine *resp_machine;
 	
+    debug("wap.wtp", 0, "XXXXXXX: resp_machine_create");
+
     resp_machine = gw_malloc(sizeof(WTPRespMachine)); 
         
     #define ENUM(name) resp_machine->name = LISTEN;
@@ -512,6 +560,8 @@
     #define INTEGER(name) resp_machine->name = 0; 
     #define TIMER(name) resp_machine->name = gwtimer_create(resp_queue); 
     #define ADDRTUPLE(name) resp_machine->name = NULL; 
+    #define LIST(name) resp_machine->name = list_create();
+    #define DICT(name) resp_machine->name = dict_create(10, NULL);
     #define MACHINE(field) field
     #include "wtp_resp_machine.def"
 
@@ -537,6 +587,8 @@
 {
     WTPRespMachine *resp_machine;
 
+    debug("wap.wtp", 0, "XXXXXXX: resp_machine_destroy");
+
     resp_machine = p;
     debug("wap.wtp", 0, "WTP: Destroying WTPRespMachine %p (%ld)", 
 	  (void *) resp_machine, resp_machine->mid);
@@ -548,6 +600,8 @@
     #define INTEGER(name) resp_machine->name = 0; 
     #define TIMER(name) gwtimer_destroy(resp_machine->name); 
     #define ADDRTUPLE(name) wap_addr_tuple_destroy(resp_machine->name); 
+    #define LIST(name) list_destroy(resp_machine->name, NULL);
+    #define DICT(name) dict_destroy(resp_machine->name);
     #define MACHINE(field) field
     #include "wtp_resp_machine.def"
     gw_free(resp_machine);
@@ -560,6 +614,8 @@
 {
     WAPEvent *event;
 	
+    debug("wap.wtp", 0, "XXXXXXX: create_tr_invoke_ind");
+
     event = wap_event_create(TR_Invoke_Ind);
     event->u.TR_Invoke_Ind.ack_type = sm->u_ack;
     event->u.TR_Invoke_Ind.user_data = octstr_duplicate(user_data);
@@ -578,6 +634,8 @@
 {
     WAPEvent *event;
 	
+    debug("wap.wtp", 0, "XXXXXXX: create_tr_result_cnf");
+
     event = wap_event_create(TR_Result_Cnf);
     event->u.TR_Result_Cnf.addr_tuple = 
 	wap_addr_tuple_duplicate(sm->addr_tuple);
@@ -592,6 +650,8 @@
 static WAPEvent *create_tr_abort_ind(WTPRespMachine *sm, long abort_reason) {
     WAPEvent *event;
 	
+    debug("wap.wtp", 0, "XXXXXXX: create_tr_abort_ind");
+
     event = wap_event_create(TR_Abort_Ind);
     event->u.TR_Abort_Ind.abort_code = abort_reason;
     event->u.TR_Abort_Ind.addr_tuple = 
@@ -609,6 +669,8 @@
 {
     WAPEvent *timer_event;
 
+    debug("wap.wtp", 0, "XXXXXXX: start_timer_A");
+
     timer_event = wap_event_create(TimerTO_A);
     timer_event->u.TimerTO_A.handle = machine->mid;
     gwtimer_start(machine->timer, L_A_WITH_USER_ACK, timer_event);
@@ -621,6 +683,8 @@
 {
     WAPEvent *timer_event;
 
+    debug("wap.wtp", 0, "XXXXXXX: start_timer_R");
+
     timer_event = wap_event_create(TimerTO_R);
     timer_event->u.TimerTO_R.handle = machine->mid;
     gwtimer_start(machine->timer, L_R_WITH_USER_ACK, timer_event);
@@ -633,6 +697,8 @@
 {
     WAPEvent *timer_event;
 
+    debug("wap.wtp", 0, "XXXXXXX: start_timer_W");
+
     timer_event = wap_event_create(TimerTO_W);
     timer_event->u.TimerTO_W.handle = machine->mid;
     gwtimer_start(machine->timer, W_WITH_USER_ACK, timer_event);
@@ -642,6 +708,8 @@
 {
     WAPEvent *e;
 
+    debug("wap.wtp", 0, "XXXXXXX: send_abort");
+
     e = wtp_pack_abort(type, reason, machine->tid, machine->addr_tuple);
     dispatch_to_wdp(e);
 }
@@ -650,6 +718,185 @@
 {
     WAPEvent *e;
 
+    debug("wap.wtp", 0, "XXXXXXX: send_ack");
+
     e = wtp_pack_ack(ack_type, rid_flag, machine->tid, machine->addr_tuple);
     dispatch_to_wdp(e);
+}
+
+static void send_segm_ack(WTPRespMachine *machine, long ack_type, int rid_flag, int psnr)
+{
+    WAPEvent *event;
+
+    debug("wap.wtp", 0, "XXXXXXX: send_segm_ack PSNR=%d", psnr);
+
+    event = wtp_pack_segmack(ack_type, rid_flag, machine->tid, psnr, machine->addr_tuple);
+    dispatch_to_wdp(event);
+}
+
+
+static void wtp_check_segments(WTPRespMachine *machine, int grp_psn, int ttr)
+{
+    WAPEvent *event;
+    int i=0, nack_len;
+    List *nack_list;
+    Octstr *key;
+    Octstr *o_psnr = octstr_create("0");
+
+    debug("wap.wtp", 0, "XXXXXXX: wtp_check_segments");
+    
+    nack_list = list_create();
+    for (i=machine->last_ack_psn+1; i<grp_psn; i++) {
+    	key = octstr_format("%ld", i); 
+    	if (dict_get(machine->curr_grp_segm, key) == NULL) {
+    		octstr_set_char(o_psnr, 0, i);
+    		list_append(nack_list, octstr_duplicate(o_psnr));
+	}
+	octstr_destroy(key);
+    }
+    
+    nack_len = list_len(nack_list);
+    
+    if (nack_len == 0) {
+	for (i=machine->last_ack_psn+1; i<=grp_psn; i++) {
+	    key = octstr_format("%ld", i); 
+    	    debug("wap.wtp", 0, "************: add_segments %d", i);
+	    list_insert(machine->segments, i, octstr_duplicate(dict_get(machine->curr_grp_segm, key)));
+	    octstr_destroy(key);
+	    machine->segm_count++;
+	}
+	
+	if (ttr == 0) {
+	    event = wtp_pack_segmack(ACKNOWLEDGEMENT, machine->rid, machine->tid, grp_psn, machine->addr_tuple);
+	    machine->result = wap_event_duplicate(event);
+	    dispatch_to_wdp(event);
+	    machine->last_ack_psn = grp_psn;
+	    machine->ack_pdu_sent = 1;
+	}
+	dict_destroy(machine->curr_grp_segm);
+	machine->curr_grp_segm = dict_create(10, NULL);
+    } else {
+	event = wtp_pack_segmnack(machine->rid, machine->tid, nack_list, machine->addr_tuple);
+	machine->result = wap_event_duplicate(event);
+	dispatch_to_wdp(event);
+    }
+    
+    list_destroy(nack_list, NULL);
+}
+
+
+static int wtp_check_group(WTPRespMachine *machine, int grp_psn)
+{
+    int i=0;
+    int result=1;
+    Octstr *key;
+    
+    debug("wap.wtp", 0, "XXXXXXX: wtp_check_group");
+
+    for (i=machine->last_ack_psn+1; i<grp_psn; i++) {
+	key = octstr_format("%ld", i); 
+    	if (dict_get(machine->curr_grp_segm, key) == NULL) {
+    		result = 0;
+		octstr_destroy(key);
+    		break;
+	}
+	octstr_destroy(key);
+    }
+    
+    return result;
+}
+
+static Octstr *cat_segments(WTPRespMachine *machine) {
+    Octstr *res = octstr_create("");
+    int i;
+
+    debug("wap.wtp", 0, "XXXXXXX: cat_segments");
+    
+    for (i=0; i<machine->segm_count; i++) {
+    	octstr_append(res, list_get(machine->segments, i));
+    }
+    
+    return res;
+}
+
+static void create_segments(WTPRespMachine *machine, Octstr *user_data) {
+    int i, dataLen, segm_size = 900;
+
+    debug("wap.wtp", 0, "XXXXXXX: create_segments");
+
+    list_destroy(machine->segments, NULL);
+    machine->last_ack_psn = -1;
+    
+    machine->segments = list_create();
+    
+    dataLen = octstr_len(user_data);
+    machine->segm_count = dataLen / segm_size;
+    
+    if (dataLen > (machine->segm_count * segm_size))
+    	machine->segm_count = machine->segm_count + 1;
+    
+    for (i=0; i<machine->segm_count; i++) {
+    	list_insert(machine->segments, i, octstr_copy_real(user_data, i * segm_size, segm_size));
+    }
+}
+
+static void wtp_send_segm(WTPRespMachine *machine) {
+    WAPEvent *result;
+    int i, max_count, grp_size=3, gtr=0, ttr=0;
+    
+    debug("wap.wtp", 0, "XXXXXXX: wtp_send_segm");
+
+    i = machine->last_ack_psn + 1;
+    if ((i + grp_size + 1) >= machine->segm_count) {
+    	machine->ttr = 1;
+    	machine->gtr = 0;
+    	max_count = machine->segm_count - 1;
+    } else {
+    	machine->ttr = 0;
+    	machine->gtr = 1;
+    	max_count = i + grp_size;
+    }
+    	
+    for (; i<max_count; i++) {
+    	result = wtp_segm_pack_result(machine, i, 0, 0);
+    	wap_event_dump(result);
+     	dispatch_to_wdp(result);
+    }
+    
+    result = wtp_segm_pack_result(machine, i, machine->gtr, machine->ttr);
+    machine->result = wap_event_duplicate(result);
+
+    dispatch_to_wdp(result);
+
+    machine->last_psn = i;
+}
+
+static void wtp_resend_segm(WTPRespMachine *machine, WAPEvent *event) {
+    WAPEvent *result;
+    int i, nmissing, gtr=0, ttr=0, psnr=0;
+    
+    debug("wap.wtp", 0, "XXXXXXX: wtp_resend_segm");
+
+    nmissing = event->u.RcvNack.nmissing;
+    
+    for (i = 0; i < nmissing; i++) {
+    	psnr = octstr_get_char(event->u.RcvNack.missing, i);
+
+    	if (psnr == machine->last_psn) {
+    	    if (machine->ttr == 0) {
+    	    	gtr = 1;
+    	    	ttr = 0;
+    	    } else {
+    	    	gtr = 0;
+    	    	ttr = 1;
+    	    }
+	}
+	
+    	result = wtp_segm_pack_result(machine, psnr, gtr, ttr);
+    	
+    	if (i == (nmissing-1))
+    	    machine->result = wap_event_duplicate(result);
+
+     	dispatch_to_wdp(result);
+    }
 }
diff -ub wap/wtp_resp.h ../gateway-cvs/wap/wtp_resp.h
--- wap/wtp_resp.h	Tue May 14 04:05:08 2002
+++ ../gateway-cvs/wap/wtp_resp.h	Sun Oct 27 23:12:06 2002
@@ -38,6 +38,8 @@
        #define ADDRTUPLE(name) WAPAddrTuple *name;
        #define ENUM(name) resp_states name;
        #define EVENT(name) WAPEvent *name;
+       #define LIST(name) List *name;
+       #define DICT(name) Dict *name;
        #define MACHINE(field) field
        #include "wtp_resp_machine.def"
 };
diff -ub wap/wtp_resp_machine.def ../gateway-cvs/wap/wtp_resp_machine.def
--- wap/wtp_resp_machine.def	Mon Nov 20 11:55:54 2000
+++ ../gateway-cvs/wap/wtp_resp_machine.def	Sun Oct 27 23:12:06 2002
@@ -44,6 +44,10 @@
     #error "Macro EVENT is missing."
 #elif !defined(ADDRTUPLE)
     #error "Macro ADDRTUPLE is missing."
+#elif !defined(LIST)
+    #error "Macro LIST is missing."
+#elif !defined(DICT)
+    #error "Macro DICT is missing."
 #endif
 
 MACHINE(ENUM(state)
@@ -57,6 +61,13 @@
                                       acknowledgement required) */ 
         INTEGER(rid)              /* retransmission flag, telling are we 
                                       resending the result */ 
+        INTEGER(gtr)
+        INTEGER(ttr)
+        INTEGER(last_ack_psn)
+        INTEGER(last_psn)
+        INTEGER(segm_count)
+        LIST(segments)
+        DICT(curr_grp_segm)
         EVENT(result)               /* packed result message - for resending */
         INTEGER(ack_pdu_sent)     /* are we resending the acknowledgement */
         TIMER(timer)              /* pointer to the timer of this machine timer
@@ -71,3 +82,5 @@
 #undef TIMER
 #undef EVENT
 #undef ADDRTUPLE
+#undef LIST
+#undef DICT
diff -ub wap/wtp_resp_states.def ../gateway-cvs/wap/wtp_resp_states.def
--- wap/wtp_resp_states.def	Tue Mar 27 06:35:39 2001
+++ ../gateway-cvs/wap/wtp_resp_states.def	Mon Oct 28 01:20:10 2002
@@ -97,17 +97,20 @@
 STATE_NAME(RESULT_WAIT)
 STATE_NAME(RESULT_RESP_WAIT)
 STATE_NAME(WAIT_TIMEOUT_STATE)
+STATE_NAME(SEGMENT_WAIT)
+STATE_NAME(SEGMENT_ACK_WAIT)
 
 ROW(LISTEN,
     RcvInvoke,
     (event->u.RcvInvoke.tcl == 2 || event->u.RcvInvoke.tcl == 1) &&
+     event->u.RcvInvoke.ttr == 1 &&
      wtp_tid_is_valid(event, resp_machine) == ok,
     {
      resp_machine->u_ack = event->u.RcvInvoke.up_flag;
      resp_machine->tcl = event->u.RcvInvoke.tcl;
+     resp_machine->ttr = 1;
 
-     wsp_event = create_tr_invoke_ind(resp_machine, 
-         event->u.RcvInvoke.user_data);
+     wsp_event = create_tr_invoke_ind(resp_machine, event->u.RcvInvoke.user_data);
      if (resp_machine->tcl == 1)
          wsp_push_client_dispatch_event(wsp_event);
      else
@@ -119,6 +122,31 @@
     INVOKE_RESP_WAIT)
 
 /*
+ * Segmented Message Arrived
+ */
+ROW(LISTEN,
+    RcvInvoke,
+     event->u.RcvInvoke.tcl == 2 &&
+     event->u.RcvInvoke.ttr == 0 &&     
+     wtp_tid_is_valid(event, resp_machine) == ok,
+    {
+     resp_machine->u_ack = event->u.RcvInvoke.up_flag;
+     resp_machine->tcl = event->u.RcvInvoke.tcl;
+     resp_machine->gtr = event->u.RcvInvoke.gtr;
+     resp_machine->ttr = 0;
+     resp_machine->last_ack_psn = -1;
+     
+     dict_put(resp_machine->curr_grp_segm, octstr_create("0"), octstr_duplicate(event->u.RcvInvoke.user_data));
+
+     if (resp_machine->gtr == 1) {
+     	wtp_check_segments(resp_machine, 0, 0);
+     }
+
+     start_timer_A(resp_machine);
+    },
+    SEGMENT_WAIT)
+
+/*
  * We must here store event fields and wsp indication into the wtp responder 
  * state machine: if tid is valid, we will continue the transaction without a 
  * new event.
@@ -133,8 +161,20 @@
      
      resp_machine->u_ack = event->u.RcvInvoke.up_flag;
      resp_machine->tcl = event->u.RcvInvoke.tcl;
-     resp_machine->invoke_indication = create_tr_invoke_ind(resp_machine, 
-                                       event->u.RcvInvoke.user_data);
+     resp_machine->ttr = event->u.RcvInvoke.ttr;
+
+     if (resp_machine->ttr == 1) {
+     	resp_machine->invoke_indication = create_tr_invoke_ind(resp_machine, event->u.RcvInvoke.user_data);
+     } else {
+	dict_put(resp_machine->curr_grp_segm, octstr_create("0"), octstr_duplicate(event->u.RcvInvoke.user_data));
+	resp_machine->segm_count = 1;
+	
+	if (resp_machine->gtr == 1) {
+	    wtp_check_segments(resp_machine, 0, 0);
+	}
+     }
+     start_timer_A(resp_machine);
+
      debug("wap.wtp", 0, "WTP_STATE: generating invoke indication, tid being" 
            "invalid");
     },
@@ -147,9 +187,15 @@
     RcvInvoke,
     event->u.RcvInvoke.tcl == 0,
     {
+     resp_machine->tcl = event->u.RcvInvoke.tcl;
+     resp_machine->u_ack = event->u.RcvInvoke.up_flag;
+     resp_machine->gtr = event->u.RcvInvoke.gtr;
+     resp_machine->ttr = event->u.RcvInvoke.ttr;
+
      wsp_event = create_tr_invoke_ind(resp_machine, 
          event->u.RcvInvoke.user_data);
      wsp_session_dispatch_event(wsp_event);
+     
     },
     LISTEN)
 
@@ -165,6 +211,17 @@
     LISTEN)
 
 /*
+ * No user indication here: transaction is not yet started.
+ */
+ROW(LISTEN,
+    RcvSegmInvoke,
+    1,
+    { 
+     send_abort(resp_machine, PROVIDER, PROTOERR);
+    },
+    LISTEN)
+
+/*
  * We must cache the newly accepted tid item, otherwise every tid after a 
  * suspected one will be validated. We use wsp event stored by the responder
  * machine.
@@ -172,6 +229,7 @@
 ROW(TIDOK_WAIT,
     RcvAck,
     (resp_machine->tcl == 2 || resp_machine->tcl == 1) && 
+     resp_machine->ttr == 1 && 
      event->u.RcvAck.tid_ok == 1,
     { 
      wsp_event = wap_event_duplicate(resp_machine->invoke_indication);
@@ -187,6 +245,23 @@
     },
     INVOKE_RESP_WAIT)
 
+ROW(TIDOK_WAIT,
+    RcvAck,
+     resp_machine->tcl == 2 &&
+     resp_machine->ttr == 0 && 
+     event->u.RcvAck.tid_ok == 1,
+    {
+     wtp_tid_set_by_machine(resp_machine, event->u.RcvAck.tid);
+
+     if (resp_machine->gtr == 1) {
+	send_segm_ack(resp_machine, ACKNOWLEDGEMENT, resp_machine->rid, 0); 
+	resp_machine->last_ack_psn = 0;
+	resp_machine->ack_pdu_sent = 1;
+	start_timer_A(resp_machine);
+     }
+    },
+    SEGMENT_WAIT)
+
 /*
  * When we get a negative answer to tid verification, we just abort trans-
  * action. Because wtp machines are destroyed when their state return to 
@@ -213,7 +288,9 @@
     RcvInvoke,
     event->u.RcvInvoke.rid == 1,
     { 
+     if (resp_machine->gtr == 1) {
      send_ack(resp_machine, TID_VERIFICATION, resp_machine->rid);
+     }
     },
     TIDOK_WAIT)
 
@@ -368,7 +445,7 @@
 
 ROW(RESULT_WAIT,
     TR_Result_Req,
-    1,
+    octstr_len(event->u.TR_Result_Req.user_data) <= 900,
     {
      WAPEvent *result;
      resp_machine->rcr = 0;
@@ -385,6 +462,26 @@
     RESULT_RESP_WAIT)
 
 ROW(RESULT_WAIT,
+    TR_Result_Req,
+    octstr_len(event->u.TR_Result_Req.user_data) > 900,
+    {
+     WAPEvent *result;
+     resp_machine->rcr = 0;
+
+     wap_event_destroy(resp_machine->result);
+     resp_machine->rid = 0;
+     resp_machine->last_ack_psn = -1;
+     
+     create_segments(resp_machine, event->u.TR_Result_Req.user_data);
+     wtp_send_segm(resp_machine);
+
+     resp_machine->rid = 1;
+
+     start_timer_R(resp_machine);
+    },
+    SEGMENT_ACK_WAIT)
+
+ROW(RESULT_WAIT,
     RcvAbort,
     1,
     {
@@ -415,6 +512,26 @@
     RESULT_WAIT)
 
 ROW(RESULT_WAIT,
+    RcvSegmInvoke,
+    event->u.RcvSegmInvoke.rid == 0,
+    { },
+    RESULT_WAIT)
+
+ROW(RESULT_WAIT,
+    RcvSegmInvoke,
+    event->u.RcvSegmInvoke.rid == 1 && resp_machine->ack_pdu_sent == 0,
+    { },
+    RESULT_WAIT)
+
+ROW(RESULT_WAIT,
+    RcvSegmInvoke,
+    event->u.RcvSegmInvoke.rid == 1 && resp_machine->ack_pdu_sent == 1,
+    {
+     /*send_segm_ack(resp_machine, ACKNOWLEDGEMENT, resp_machine->rid, event->u.RcvSegmInvoke.psn);*/
+    },
+    RESULT_WAIT)
+
+ROW(RESULT_WAIT,
     TR_Abort_Req,
     1,
     { 
@@ -493,6 +610,14 @@
     RESULT_RESP_WAIT)
 
 ROW(RESULT_RESP_WAIT,
+    RcvSegmInvoke,
+    1,
+    { 
+     dispatch_to_wdp(wap_event_duplicate(resp_machine->result));
+    },
+    RESULT_RESP_WAIT)
+
+ROW(RESULT_RESP_WAIT,
     RcvAbort,
     1,
     {
@@ -560,6 +685,20 @@
     WAIT_TIMEOUT_STATE)
 
 ROW(WAIT_TIMEOUT_STATE,
+    RcvSegmInvoke,
+    event->u.RcvSegmInvoke.rid == 0,
+    { },
+    WAIT_TIMEOUT_STATE)
+
+ROW(WAIT_TIMEOUT_STATE,
+    RcvSegmInvoke,
+    event->u.RcvInvoke.rid == 1,
+    {
+     /*send_segm_ack(resp_machine, ACKNOWLEDGEMENT, resp_machine->rid, event->u.RcvSegmInvoke.psn);*/
+    },
+    WAIT_TIMEOUT_STATE)
+
+ROW(WAIT_TIMEOUT_STATE,
     RcvErrorPDU,
     1,
     {
@@ -598,6 +737,256 @@
      send_abort(resp_machine, USER, event->u.TR_Abort_Req.abort_reason);
     },
     LISTEN)
+
+ROW(SEGMENT_WAIT,
+    RcvSegmInvoke,
+     event->u.RcvSegmInvoke.ttr == 0,
+    {
+     Octstr *key;
+
+     resp_machine->gtr = event->u.RcvSegmInvoke.gtr;
+
+     if (event->u.RcvSegmInvoke.gtr == 1)
+        resp_machine->last_psn = event->u.RcvSegmInvoke.psn;
+
+     resp_machine->rid = event->u.RcvSegmInvoke.rid;
+
+     key = octstr_format("%ld", event->u.RcvSegmInvoke.psn);
+     dict_put(resp_machine->curr_grp_segm, octstr_duplicate(key), octstr_duplicate(event->u.RcvSegmInvoke.user_data));
+
+     if ((event->u.RcvSegmInvoke.psn > resp_machine->last_ack_psn) && (event->u.RcvSegmInvoke.gtr == 1)) {
+        resp_machine->aec = 0;
+     	wtp_check_segments(resp_machine, event->u.RcvSegmInvoke.psn, 0);
+     }
+     octstr_destroy(key);
+
+     start_timer_A(resp_machine);
+    },
+    SEGMENT_WAIT)
+
+ROW(SEGMENT_WAIT,
+    RcvSegmInvoke,
+     event->u.RcvSegmInvoke.ttr == 1 && 
+     wtp_check_group(resp_machine, event->u.RcvSegmInvoke.psn) == 0,
+    {       
+     Octstr *key;
+
+     resp_machine->gtr = 0;
+     resp_machine->rid = event->u.RcvSegmInvoke.rid;
+     resp_machine->ttr = 1;
+     resp_machine->aec++;
+
+     key = octstr_format("%ld", event->u.RcvSegmInvoke.psn);
+     dict_put(resp_machine->curr_grp_segm, octstr_duplicate(key), octstr_duplicate(event->u.RcvSegmInvoke.user_data));
+     wtp_check_segments(resp_machine, event->u.RcvSegmInvoke.psn, 1);
+     octstr_destroy(key);
+
+     start_timer_A(resp_machine);
+    },      
+    SEGMENT_WAIT)
+
+ROW(SEGMENT_WAIT,
+    RcvSegmInvoke,
+     event->u.RcvSegmInvoke.ttr == 1 && 
+     wtp_check_group(resp_machine, event->u.RcvSegmInvoke.psn) == 1,
+    {       
+     Octstr *key;
+
+     resp_machine->gtr = 0;
+     resp_machine->rid = event->u.RcvSegmInvoke.rid;
+     resp_machine->ttr = 1;
+     resp_machine->aec = 0;
+
+     key = octstr_format("%ld", event->u.RcvSegmInvoke.psn);
+     dict_put(resp_machine->curr_grp_segm, octstr_duplicate(key), octstr_duplicate(event->u.RcvSegmInvoke.user_data));
+     wtp_check_segments(resp_machine, event->u.RcvSegmInvoke.psn, 1);
+
+     octstr_destroy(key);
+
+     wsp_event = create_tr_invoke_ind(resp_machine, cat_segments(resp_machine));
+     wsp_session_dispatch_event(wsp_event);
+
+     start_timer_A(resp_machine);
+    },      
+    INVOKE_RESP_WAIT)
+
+ROW(SEGMENT_WAIT,
+    TimerTO_R,
+    resp_machine->rcr < MAX_RCR,
+    { },
+    SEGMENT_WAIT)
+
+ROW(SEGMENT_WAIT,
+    TimerTO_R,
+    resp_machine->rcr == MAX_RCR,
+    { },
+    LISTEN)
+
+ROW(SEGMENT_WAIT,
+    TimerTO_A,
+    resp_machine->aec < AEC_MAX,
+    {
+     resp_machine->aec++;
+     resp_machine->rid = 1;
+
+     debug("wap.wtp", 0, "*******: last_psn=%d  last_ack_psn=%d", resp_machine->last_psn, resp_machine->last_ack_psn);
+     if (resp_machine->last_psn >= resp_machine->last_ack_psn) {
+     	wtp_check_segments(resp_machine, resp_machine->last_psn, resp_machine->ttr);
+     }
+
+     start_timer_A(resp_machine);
+
+    },
+    SEGMENT_WAIT)
+
+ROW(SEGMENT_WAIT,
+    TimerTO_A,
+    resp_machine->aec == AEC_MAX,
+    {
+     send_abort(resp_machine, USER, NORESPONSE);
+    },
+    LISTEN)
+
+ROW(SEGMENT_WAIT,
+    TR_Abort_Req,
+    1,
+    {
+     send_abort(resp_machine, USER, event->u.TR_Abort_Req.abort_reason);
+    },
+    LISTEN)
+
+ROW(SEGMENT_WAIT,
+    RcvErrorPDU,
+    1,
+    {
+     send_abort(resp_machine, PROVIDER, PROTOERR);
+    },
+    LISTEN)
+
+ROW(SEGMENT_WAIT,
+    RcvAbort,
+    1,
+    {
+    },
+    LISTEN)
+
+ROW(SEGMENT_ACK_WAIT,
+    RcvAck,
+    resp_machine->ttr == 0,
+    {
+     if (event->u.RcvAck.psnr == resp_machine->last_psn) {
+         resp_machine->rcr = 0;
+         resp_machine->rid = 0;
+         resp_machine->last_ack_psn = resp_machine->last_psn;
+
+         wap_event_destroy(resp_machine->result);
+         wtp_send_segm(resp_machine);
+
+         resp_machine->rid = 1;
+
+         start_timer_R(resp_machine);
+     }
+    },
+    SEGMENT_ACK_WAIT)
+
+ROW(SEGMENT_ACK_WAIT,
+    RcvAck,
+    resp_machine->ttr == 1 &&
+    event->u.RcvAck.psnr == resp_machine->last_psn,
+    { },
+    LISTEN)
+
+ROW(SEGMENT_ACK_WAIT,
+    RcvNack,
+    1,
+    {
+     resp_machine->rcr++;
+     resp_machine->rid = 1;
+
+     wap_event_destroy(resp_machine->result);
+     
+     if (event->u.RcvNack.nmissing == 0) {
+     	wtp_send_segm(resp_machine);
+     } else {
+     	wtp_resend_segm(resp_machine, event);
+     }
+     
+     start_timer_R(resp_machine);
+    },
+    SEGMENT_ACK_WAIT)
+
+ROW(SEGMENT_ACK_WAIT,
+    TimerTO_R,
+    resp_machine->rcr < MAX_RCR,
+    {
+     resp_machine->rcr++;
+     resp_machine->rid = 1;
+     
+     wap_event_destroy(resp_machine->result);
+     wtp_send_segm(resp_machine);
+     
+     /*dispatch_to_wdp(wap_event_duplicate(resp_machine->result));*/
+     
+     start_timer_R(resp_machine);
+    },
+    SEGMENT_ACK_WAIT)
+
+ROW(SEGMENT_ACK_WAIT,
+    TimerTO_R,
+    resp_machine->rcr == MAX_RCR,
+    {
+     send_abort(resp_machine, USER, NORESPONSE);
+     wsp_event = create_tr_abort_ind(resp_machine, NORESPONSE);
+     wsp_session_dispatch_event(wsp_event);
+    },
+    LISTEN)
+
+ROW(SEGMENT_ACK_WAIT,
+    RcvResult,
+    1,
+    {
+     WAPEvent *result;
+     resp_machine->rcr = 0;
+     resp_machine->rid = 0;
+
+     start_timer_R(resp_machine);
+     wap_event_destroy(resp_machine->result);
+
+     result = wtp_pack_ack(ACKNOWLEDGEMENT, 0, resp_machine->tid, resp_machine->addr_tuple);
+     resp_machine->result = wap_event_duplicate(result);
+     dispatch_to_wdp(result);
+     resp_machine->rid = 1;
+    },
+    LISTEN)
+
+ROW(SEGMENT_ACK_WAIT,
+    RcvAbort,
+    1,
+    {
+     wsp_event = create_tr_abort_ind(resp_machine, event->u.RcvAbort.abort_reason);
+     wsp_session_dispatch_event(wsp_event);
+    },
+    LISTEN)
+
+ROW(SEGMENT_ACK_WAIT,
+    RcvInvoke,
+    1,
+    { },
+    SEGMENT_ACK_WAIT)
+
+ROW(SEGMENT_ACK_WAIT,
+    TR_Abort_Req,
+    1,
+    { 
+     send_abort(resp_machine, USER, event->u.TR_Abort_Req.abort_reason);
+    },
+    LISTEN)
+
+ROW(SEGMENT_ACK_WAIT,
+    RcvErrorPDU,
+    1,
+    {},
+    SEGMENT_ACK_WAIT)
 
 #undef ROW
 #undef STATE_NAME
