Hello Sascha,

Am 19.06.20 um 09:44 schrieb Sascha Hauer:
> +struct fastboot_net {
> +     struct fastboot fastboot;
> +
> +     struct net_connection *net_con;
> +     struct fastboot_header response_header;
> +     struct poller_struct poller;
> +     struct work_queue wq;
> +     u64 host_waits_since;
> +     u64 last_download_pkt;
> +     bool sequence_number_seen;
> +     bool active_download;
> +     bool reinit;
> +     bool send_keep_alive;
> +     enum may_send may_send;
> +
> +     IPaddr_t host_addr;
> +     u16 host_port;
> +     u8 host_mac[ETH_ALEN];
> +     u16 sequence_number;
> +     u16 last_payload_len;
> +     uchar last_payload[FASTBOOT_MAX_CMD_LEN + sizeof(struct 
> fastboot_header)];

This is not FASTBOOT_MAX_CMD_LEN. It's the 64 that is strewn around in
fastboot_tx_print. Adding a new constant FASTBOOT_MAX_MSG_LEN would be
correct.

[...]

> +static int fastboot_write_net(struct fastboot *fb, const char *buf,
> +                           unsigned int n)
> +{
> +     struct fastboot_net *fbn = container_of(fb, struct fastboot_net,
> +                                             fastboot);
> +     struct fastboot_header response_header;
> +     uchar *packet;
> +     uchar *packet_base;
> +     int ret;
> +
> +     if (fbn->reinit)
> +             return 0;
> +
> +     /*
> +      * This function is either called in command context, in which
> +      * case we may wait, or from the keepalive poller which explicitly
> +      * only calls us when we don't have to wait here.
> +      */
> +     ret = fastboot_net_wait_may_send(fbn);
> +     if (ret) {
> +             fastboot_net_abort(fbn);
> +             return ret;
> +     }
> +
> +     if (n && fbn->may_send == MAY_SEND_ACK) {
> +             fastboot_send(fbn, fbn->response_header,
> +                             "Have message but only ACK allowed");
> +             return -EPROTO;
> +     } else if (!n && fbn->may_send == MAY_SEND_MESSAGE) {
> +             fastboot_send(fbn, fbn->response_header,
> +                             "Want to send ACK but message expected");
> +             return -EPROTO;
> +     }
> +
> +     response_header = fbn->response_header;
> +     response_header.flags = 0;
> +     response_header.seq = htons(fbn->sequence_number);
> +     ++fbn->sequence_number;
> +     fbn->sequence_number_seen = false;
> +
> +     packet = net_udp_get_payload(fbn->net_con);
> +     packet_base = packet;
> +
> +     /* Write headers */
> +     memcpy(packet, &response_header, sizeof(response_header));
> +     packet += sizeof(response_header);
> +     /* Write response */
> +     memcpy(packet, buf, n);
> +     packet += n;
> +
> +     /* Save packet for retransmitting */
> +     fbn->last_payload_len = packet - packet_base;
> +     memcpy(fbn->last_payload, packet_base, fbn->last_payload_len);
> +
> +     memcpy(fbn->net_con->et->et_dest, fbn->host_mac, ETH_ALEN);
> +     net_write_ip(&fbn->net_con->ip->daddr, fbn->host_addr);
> +     fbn->net_con->udp->uh_dport = fbn->host_port;
> +     net_udp_send(fbn->net_con, fbn->last_payload_len);
> +
> +     fbn->may_send = MAY_NOT_SEND;

You moved that line below net_udp_send. Is there any risk that

1. our work queue executes a command which calls fastboot_tx_print
2. the net_udp_send caused by that fastboot_tx_print sleeps
3. our poller is executed and decides to send a message because
   may_send is still MAY_SEND_MESSAGE

?

[...]

> +static void fastboot_start_download_net(struct fastboot *fb)
> +{
> +     struct fastboot_net *fbn = container_of(fb, struct fastboot_net,
> +                                             fastboot);
> +
> +     fastboot_start_download_generic(fb);
> +     fbn->active_download = true;
> +     fbn->last_download_pkt = get_time_ns();
> +}

Although you added that last_download_pkt timeout check to the poller,
there is still the risk that we will never close download_fd if
fastboot_net_abort is called (f.ex. by the first fastboot_tx_print
inside cb_download) before we open download_fd. In that case there
is no poller to check for the timeout.

[...]

> +static void fastboot_handle_type_fastboot(struct fastboot_net *fbn,
> +                                       struct fastboot_header header,
> +                                       char *fastboot_data,
> +                                       unsigned int fastboot_data_len)
> +{
> +     struct fastboot_work *w;
> +
> +     fbn->response_header = header;
> +     fbn->host_waits_since = get_time_ns();
> +     fbn->may_send = fastboot_data_len ? MAY_SEND_ACK : MAY_SEND_MESSAGE;
> +
> +     if (fbn->active_download) {
> +             fbn->last_download_pkt = get_time_ns();
> +
> +             if (!fastboot_data_len && fbn->fastboot.download_bytes
> +                                        == fbn->fastboot.download_size) {
> +
> +                     fbn->active_download = false;
> +
> +                     w = xzalloc(sizeof(*w));
> +                     w->fbn = fbn;
> +                     w->download_finished = true;
> +
> +                     wq_queue_work(&fbn->wq, &w->work);
> +             } else {
> +                     fastboot_data_download(fbn, fastboot_data,
> +                                            fastboot_data_len);
> +             }
> +             return;
> +     }
> +
> +     if (fastboot_data_len >= FASTBOOT_MAX_CMD_LEN) {

Still off-by-one. Replace >= with >

[...]

> +     case FASTBOOT_INIT:
> +             if (ntohs(header.seq) != fbn->sequence_number) {
> +                     fastboot_check_retransmit(fbn, header);
> +                     break;
> +             }
> +             fbn->host_addr = net_read_ip(&ip_header->saddr);
> +             fbn->host_port = udp_header->uh_sport;
> +             memcpy(fbn->host_mac, eth_header->et_src, ETH_ALEN);
> +             fastboot_net_abort(fbn);
> +             ret = poller_register(&fbn->poller, "fastboot");
> +             if (ret) {
> +                     pr_err("Cannot register poller: %s\n", strerror(-ret));
> +                     return;

It is not obvious that a second FASTBOOT_INIT will _not_ cause this
error because fastboot_net_abort unregistered the previous poller.
I would at least add a comment to the fastboot_net_abort(fbn) line.

[...]

> +static void fastboot_poll(struct poller_struct *poller)
> +{
> +     struct fastboot_net *fbn = container_of(poller, struct fastboot_net,
> +                                            poller);
> +
> +     if (fbn->active_download && is_timeout(fbn->last_download_pkt, 5 * 
> SECOND)) {

Should pollers prefer is_timeout_non_interruptible over is_timeout?

I can make a new patch set where all issues are fixed, if you don't insist
on doing it yourself.

Best regards,

  Daniel


-- 
Dipl.-Math. Daniel Glöckner, emlix GmbH, http://www.emlix.com
Fon +49 551 30664-0, Fax +49 551 30664-11,
Gothaer Platz 3, 37083 Göttingen, Germany
Sitz der Gesellschaft: Göttingen, Amtsgericht Göttingen HR B 3160
Geschäftsführung: Heike Jordan, Dr. Uwe Kracke
Ust-IdNr.: DE 205 198 055

emlix - your embedded linux partner

_______________________________________________
barebox mailing list
[email protected]
http://lists.infradead.org/mailman/listinfo/barebox

Reply via email to