Hello ones more. Please follow the link 
 to see well-formatted version of this letter, 2nd message.

Another implementation via **raw** interface looks more complicated, that is 
why I like it less. But it works flawlessly. Here is the code:

``` C
 * @brief  Processing an incoming message on the Event port.
 * @param  arg the user argument
 * @param  pcb the udp_pcb that has received the data
 * @param  p the packet buffer
 * @param  addr the addres of sender
 * @param  port the port number of sender
static void vmp_RecvCallback( void *arg, struct udp_pcb *pcb, struct pbuf *pbuf,
struct ip_addr *addr, u16_t port)
// Собрать пакеты в один встроенной функцией
pbuf = pbuf_coalesce(pbuf, PBUF_RAW);

vmp_Header_t *h = pbuf->payload;
if( h->variant=='E' || h->variant=='D' )  //&& pbuf->len >= 
if( vmp_MulticastListenerViaRaw_queue )
xQueueSend(vmp_MulticastListenerViaRaw_queue, pbuf->payload, 0);


 * Listening task
void vmp_MulticastListenerViaRaw_task(void* dummy)
(void)dummy; //to suppress warnings about not used
struct udp_pcb *pcb;
err_t err;
union {
vmp_Header_t   *h;
vmp_IamAlive_t *a;
vmp_Event_t    *e;
//vmp_Waveform_t *w;
} vmp;
vmp.e = malloc(sizeof(vmp_Event_t)); assert_amsg(vmp.e!=NULL);
// Check if this singleton task already created
if( vmp_MulticastListenerViaRaw_task_Handle != NULL ){
debugf("%s: vmp_RecvCallback_task already created\n", __func__); 
goto exit_task;
vmp_MulticastListenerViaRaw_task_Handle = xTaskGetCurrentTaskHandle();
vmp_MulticastListenerViaRaw_queue = xQueueCreate(5, sizeof(vmp_Event_t));
// create a new pcb for the server.
pcb = udp_new();
if( pcb == NULL ){
debugf("%s: Cannot create udp_pcb\n", __func__);
goto fail01;
// bind the listening port. Actually opening the socket for listening.
err = udp_bind(pcb, IP_ADDR_ANY, MULTI_SENDER_PORT);
if( err != ERR_OK ){
debugf("%s: Cannot bind udp_pcb\n", __func__);
goto fail02;
// hook the recv function to the new pcb
udp_recv(pcb, vmp_RecvCallback, NULL);
// Init General multicast IP address
ip_addr_t multicastGroup; 
multicastGroup.addr = ipaddr_addr(MULTI_SENDER_ADDRESS);  
// Join multicast group (for receiving) on default interface
igmp_joingroup( &netif_default->ip_addr, &multicastGroup );
xQueueReceive( vmp_MulticastListenerViaRaw_queue, vmp.e, portMAX_DELAY );
// vmp contains received packet. do weyw with it

for(;;) vTaskDelete(NULL);
На 2016-10-14 17:29:19, Иван Кувалдин <i....@ya.ru> писал:
Hello. Please look for a formatted version of this message on 
I have met yet another strange problem. And cannot debug it.
The code at the end of message works fine until something goes wrong in 
`recv_udp()` and program falls into `HardFault_Handler()` possible because of 
Let's consider stack trace:


The data flow is really big, but stack size of tcpip_thread is big too, 4096 
bytes. Please help me to debug mistakes.

* Socket based multicast listener
void vmp_MulticastListener_task( void *args )
int sock;
struct sockaddr_in address={0};//, mcast_addr;
struct ip_mreq mreq; // multicast request
union {
vmp_Header_t *h;
vmp_IamAlive_t *a;
vmp_Event_t *e;
vmp_Waveform_t *w;
} vmp;
const uint16_t vmp_MAX_SIZE = sizeof(vmp_Waveform_t);

vmp.w = malloc(vmp_MAX_SIZE);

sock = socket(AF_INET, SOCK_DGRAM, 0);
if( sock < 0="">
debugf("%s: Cannot create socket", __func__);
goto stopTask;
// SO_REUSEADDR mainly changes the way how wildcard addresses ("any IP 
address") are treated when searching for conflicts.
// Without SO_REUSEADDR, binding socketA to and then binding socketB 
to will fail
// (with error EADDRINUSE), since means "any local IP address", thus 
all local IP addresses are considered
// in use by this socket and this includes, too. With SO_REUSEADDR 
it will succeed, since and
// are not exactly the same address, one is a wildcard for all 
local addresses and the other one is a
// very specific local address. Note that the statement above is true 
regardless in which order socketA and socketB
// are bound; without SO_REUSEADDR it will always fail, with SO_REUSEADDR it 
will always succeed.
const int optval_true = 1;
setsockopt( sock, SOL_SOCKET, SO_REUSEADDR, &optval_true, sizeof(optval_true) );

// Bind to port MULTI_SENDER_PORT at any interface.
address.sin_family = AF_INET;
address.sin_addr.s_addr = htonl(INADDR_ANY); //addr.sin_addr.s_addr = 
address.sin_port = htons(MULTI_SENDER_PORT);
if( bind(sock, (struct sockaddr*) &address, sizeof(address)) < 0="">
debugf("%s: cannot bind socket", __func__);
goto stopTask;

// Join to the multicast group
mreq.imr_multiaddr.s_addr = inet_addr(MULTI_SENDER_ADDRESS); 
mreq.imr_interface.s_addr = htonl(INADDR_ANY); //inet_addr("192.168.x.x"); 
if( setsockopt(sock, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0="">
debugf("%s: cannot join multicast", __func__);
goto stopTask;

struct sockaddr_in addrfrom = {0};
socklen_t addrfrom_len = sizeof(addrfrom);
int len = recvfrom(sock, vmp.w, vmp_MAX_SIZE, 0, (struct sockaddr*)&addrfrom, 
if( len>sizeof(vmp_Header_t) ){
debugf2("%c recved from %08x:%s:%s\n", vmp.h->variant, vmp.h->uid32, 
vmp.h->role, ipaddr_ntoa((ip_addr_t*)&addrfrom.sin_addr) );
if( vmp.h->variant == 'E' ){
Role_t role = vmp_parseRole(vmp.h->role);
if( vmp_CurrentIsSlaveOfMaster(&role) ){
extern const volatile QueueHandle_t indication_queue;
xQueueOverwrite(indication_queue, vmp.e );

Best regards
Ivan Kuvaldin

lwip-users mailing list
lwip-users mailing list

Reply via email to