[PATCH RFC/RFT] ath10k: use xmit encapsulation offloading

2022-05-10 Thread Edward Matijevic
Tested on a TP-Link Archer C2600 (IPQ8064 + QCA9980), using official
22.03-rc1 and your patch on top of 22.03
Load average peaks were still the same but with the offloading 1min
avg wasn't changing as sharply as without offload
My download speed was a bit more stable with the offload, tested with
a game download from Steam(20+ GB) maintaining over 40MB/s when it'd
only sometimes peak over 40.
Bufferbloat(from Waveform) latency testing wasn't noticeably
different(maybe a slight improvement to minimums), without SQM on
both.

No obvious regressions but haven't run the patch long enough yet to see issues

___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


Re: [PATCH V3] tplink-safeloader: Patch to handle partitions with alternate names

2022-05-10 Thread Sander Vanheule
Hi Ole Kristian,

Could you CC Rafał and me (and other reviewers) on revised patches? At least we 
should get
the patch without DMARC wrapping in that case.

On Thu, 2022-05-05 at 17:41 +0200, Ole Kristian Lona wrote:
>     Some devices, specifically Deco M4R-v3 / M5 have partition names that
>     deviate from the scheme other devices use. They have an added "@0"
>     and "@1" for some patition names. The devices have
>     fallback partitions which will be used in case the device
>     determines that the primary partition set is unbootable.
> 
>     This patch introduces an option to set these alternate partition
>     names in the device definition of tplink-safeloader.
> 
>     Signed-off-by: Ole Kristian Lona 
> ---
>  src/tplink-safeloader.c | 58 -
>  1 file changed, 45 insertions(+), 13 deletions(-)
> 
> diff --git a/src/tplink-safeloader.c b/src/tplink-safeloader.c
> index 1c08a33..2adcaf6 100644
> --- a/src/tplink-safeloader.c
> +++ b/src/tplink-safeloader.c
> @@ -48,11 +48,20 @@ struct image_partition_entry {
>  
>  /** A flash partition table entry */
>  struct flash_partition_entry {
> -   char *name;
> +   const char *name;
> uint32_t base;
> uint32_t size;
>  };
>  
> +/** Flash partition names table entry */
> +struct factory_partition_names {
> +   const char *partition_table;
> +   const char *soft_ver;
> +   const char *os_image;
> +   const char *file_system;
> +   const char *extra_para;
> +};
> +
>  /** Partition trailing padding definitions
>   * Values 0x00 to 0xff are reserved to indicate the padding value
>   * Values from 0x100 are reserved to indicate other behaviour */
> @@ -89,6 +98,7 @@ struct device_info {
> struct flash_partition_entry partitions[MAX_PARTITIONS+1];
> const char *first_sysupgrade_partition;
> const char *last_sysupgrade_partition;
> +   struct factory_partition_names partition_names;
>  };
>  
>  #define SOFT_VER_TEXT(_t) {.type = SOFT_VER_TYPE_TEXT, .text = _t}
> @@ -2962,6 +2972,21 @@ static struct image_partition_entry 
> alloc_image_partition(const
> char *name, size
> return entry;
>  }
>  
> +/** Sets up default partition names whenever custom names aren't specified */
> +static void set_partition_names(struct device_info *info)
> +{
> +   if (!info->partition_names.partition_table)
> +   info->partition_names.partition_table = "partition-table";
> +   if (!info->partition_names.soft_ver)
> +   info->partition_names.soft_ver = "soft-version";
> +   if (!info->partition_names.os_image)
> +   info->partition_names.os_image = "os-image";
> +   if (!info->partition_names.file_system)
> +   info->partition_names.file_system = "file-system";
> +   if (!info->partition_names.extra_para)
> +   info->partition_names.extra_para = "extra-para";
> +}
> +
>  /** Frees an image partition */
>  static void free_image_partition(struct image_partition_entry entry) {
> free(entry.data);
> @@ -2982,8 +3007,10 @@ static void set_source_date_epoch() {
>  }
>  
>  /** Generates the partition-table partition */
> -static struct image_partition_entry make_partition_table(const struct
> flash_partition_entry *p) {
> -   struct image_partition_entry entry = 
> alloc_image_partition("partition-table",
> 0x800);
> +static struct image_partition_entry make_partition_table(const char *name,
> +   const struct flash_partition_entry *p)
> +{
> +   struct image_partition_entry entry = alloc_image_partition(name, 
> 0x800);

If you pass a device_info reference instead of the flash_partition_entry list, 
you can
draw all the required information from that struct and don't need to pass an 
additional
string. make_soft_version(), make_support_list(), and make_extra_para() already 
take a
struct device_info * argument.

>  
> char *s = (char *)entry.data, *end = (char *)(s+entry.size);
>  
> @@ -3018,14 +3045,14 @@ static inline uint8_t bcd(uint8_t v) {
>  
>  
>  /** Generates the soft-version partition */
> -static struct image_partition_entry make_soft_version(
> +static struct image_partition_entry make_soft_version(const char *name,
> const struct device_info *info, uint32_t rev)
>  {
> /** If an info string is provided, use this instead of
>  * the structured data, and include the null-termination */
> if (info->soft_ver.type == SOFT_VER_TYPE_TEXT) {
> uint32_t len = strlen(info->soft_ver.text) + 1;
> -   return init_meta_partition_entry("soft-version",
> +   return init_meta_partition_entry(name,

You should be able to draw the partition name from device_info, which you've 
updated to
contain the required value(s).

> info->soft_ver.text, len, info->part_trail);
> }
>  
> @@ -3055,11 +3082,11 @@ static struct image_partition_entry make_soft_version(
> 

Re: Optimizing kernel compilation / alignments for network performance

2022-05-10 Thread Rafał Miłecki

On 6.05.2022 11:44, Arnd Bergmann wrote:

On Fri, May 6, 2022 at 10:55 AM Rafał Miłecki  wrote:

On 6.05.2022 10:45, Arnd Bergmann wrote:

On Fri, May 6, 2022 at 9:44 AM Rafał Miłecki  wrote:

With
echo 1 > /sys/class/net/eth0/queues/rx-0/rps_cpus
my NAT speeds were jumping between 2 speeds:
284 Mbps / 408 Mbps


Can you try using 'numactl -C' to pin the iperf processes to
a particular CPU core? This may be related to the locality of
the user process relative to where the interrupts end up.


I run iperf on x86 machines connected to router's WAN and LAN ports.
It's meant to emulate end user just downloading from / uploading to
Internet some data.

Router's only task is doing masquarade NAT here.


Ah, makes sense. Can you observe the CPU usage to be on
a particular core in the slow vs fast case then?


With echo 0 > /sys/class/net/eth0/queues/rx-0/rps_cpus
NAT speed was verying between:
a) 311 Mb/s (CPUs load: 100% + 0%)
b) 408 Mb/s (CPUs load: 100% + 62%)

With echo 1 > /sys/class/net/eth0/queues/rx-0/rps_cpus
NAT speed was verying between:
a) 290 Mb/s (CPUs load: 100% + 0%)
b) 410 Mb/s (CPUs load: 100% + 63%)

With echo 2 > /sys/class/net/eth0/queues/rx-0/rps_cpus
NAT speed was stable:
a) 372 Mb/s (CPUs load: 100% + 26%)
b) 375 Mb/s (CPUs load: 82% + 100%)

With echo 3 > /sys/class/net/eth0/queues/rx-0/rps_cpus
NAT speed was verying between:
a) 293 Mb/s (CPUs load: 100% + 0%)
b) 332 Mb/s (CPUs load: 100% + 17%)
c) 374 Mb/s (CPUs load: 81% + 100%)
d) 442 Mb/s (CPUs load: 100% + 75%)



After some extra debugging I found a reason for varying CPU usage &
varying NAT speeds.

My router has a single swtich so I use two VLANs:
eth0.1 - LAN
eth0.2 - WAN
(VLAN traffic is routed to correct ports by switch). On top of that I
have "br-lan" bridge interface briding eth0.1 and wireless interfaces.

For all that time I had /sys/class/net/br-lan/queues/rx-0/rps_cpus set
to 3. So bridge traffic was randomly handled by CPU 0 or CPU 1.

So if I assign specific CPU core to each of two interfaces, e.g.:
echo 1 > /sys/class/net/eth0/queues/rx-0/rps_cpus
echo 2 > /sys/class/net/br-lan/queues/rx-0/rps_cpus
things get stable.

With above I get stable 419 Mb/s (CPUs load: 100% + 64%) on every iperf
session.

___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


[PATCH] umbim: fix invalid mbim message string encoding

2022-05-10 Thread Daniel Danzberger
Strings in mbim messages have to follow these formatting rules:
 - 4 byte alignment, padded if not.
 - utf-16 little endian.

Fixes:
 - mbim connect fails with more than 1 string parameter (apn/user/pass)
   when they are not 4 byte aligned.

Signed-off-by: Daniel Danzberger 
---
 mbim-msg.c | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/mbim-msg.c b/mbim-msg.c
index 5ec04f4..8f21aa9 100644
--- a/mbim-msg.c
+++ b/mbim-msg.c
@@ -53,8 +53,10 @@ mbim_add_payload(uint8_t len)
 int
 mbim_encode_string(struct mbim_string *str, char *in)
 {
-   int l = strlen(in);
-   int s = mbim_add_payload(l * 2);
+   const int l = strlen(in);
+   const int utf16_len = l * 2;
+   const int pad_len = utf16_len % 4;
+   const int s = mbim_add_payload(utf16_len + pad_len);
uint8_t *p = _buffer[s];
int i;
 
@@ -62,14 +64,14 @@ mbim_encode_string(struct mbim_string *str, char *in)
return -1;
 
str->offset = htole32(s);
-   str->length = htole32(l * 2);
+   str->length = htole32(utf16_len);
+
for (i = 0; i < l; i++)
p[i * 2] = in[i];
 
return 0;
 }
 
-
 char *
 mbim_get_string(struct mbim_string *str, char *in)
 {
-- 
2.35.1


___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


Re: Optimizing kernel compilation / alignments for network performance

2022-05-10 Thread Rafał Miłecki

On 6.05.2022 10:45, Arnd Bergmann wrote:

On Fri, May 6, 2022 at 9:44 AM Rafał Miłecki  wrote:


On 5.05.2022 18:04, Andrew Lunn wrote:

you'll see that most used functions are:
v7_dma_inv_range
__irqentry_text_end
l2c210_inv_range
v7_dma_clean_range
bcma_host_soc_read32
__netif_receive_skb_core
arch_cpu_idle
l2c210_clean_range
fib_table_lookup


There is a lot of cache management functions here.


Indeed, so optimizing the coherency management (see Felix' reply)
is likely to help most in making the driver faster, but that does not
explain why the alignment of the object code has such a big impact
on performance.

To investigate the alignment further, what I was actually looking for
is a comparison of the profile of the slow and fast case. Here I would
expect that the slow case spends more time in one of the functions
that don't deal with cache management (maybe fib_table_lookup or
__netif_receive_skb_core).

A few other thoughts:

- bcma_host_soc_read32() is a fundamentally slow operation, maybe
   some of the calls can turned into a relaxed read, like the readback
   in bgmac_chip_intrs_off() or the 'poll again' at the end bgmac_poll(),
   though obviously not the one in bgmac_dma_rx_read().
   It may be possible to even avoid some of the reads entirely, checking
   for more data in bgmac_poll() may actually be counterproductive
   depending on the workload.


I'll experiment with that, hopefully I can optimize it a bit.



- The higher-end networking SoCs are usually cache-coherent and
   can avoid the cache management entirely. There is a slim chance
   that this chip is designed that way and it just needs to be enabled
   properly. Most low-end chips don't implement the coherent
   interconnect though, and I suppose you have checked this already.


To my best knowledge Northstar platform doesn't support hw coherency.

I just took an extra look at Broadcom's SDK and them seem to have some
driver for selected chipsets but BCM708 isn't there.

config BCM_GLB_COHERENCY
bool "Global Hardware Cache Coherency"
default n
depends on BCM963158 || BCM96846 || BCM96858 || BCM96856 || BCM963178 
|| BCM947622 || BCM963146  || BCM94912 || BCM96813 || BCM96756 || BCM96855



- bgmac_dma_rx_update_index() and bgmac_dma_tx_add() appear
   to have an extraneous dma_wmb(), which should be implied by the
   non-relaxed writel() in bgmac_write().


I tried dropping wmb() calls.
With wmb(): 421 Mb/s
Without: 418 Mb/s


I also tried dropping bgmac_read() from bgmac_chip_intrs_off() which
seems to be a flushing readback.

With bgmac_read(): 421 Mb/s
Without: 413 Mb/s



- accesses to the DMA descriptor don't show up in the profile here,
   but look like they can get misoptimized by the compiler. I would
   generally use READ_ONCE() and WRITE_ONCE() for these to
   ensure that you don't end up with extra or out-of-order accesses.
   This also makes it clearer to the reader that something special
   happens here.


Should I use something as below?

FWIW it doesn't seem to change NAT performance.
Without WRITE_ONCE: 421 Mb/s
With: 419 Mb/s


diff --git a/drivers/net/ethernet/broadcom/bgmac.c 
b/drivers/net/ethernet/broadcom/bgmac.c
index 87700072..ce98f2a9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -119,10 +119,10 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct 
bgmac_dma_ring *ring,

slot = >slots[i];
dma_desc = >cpu_base[i];
-   dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
-   dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
-   dma_desc->ctl0 = cpu_to_le32(ctl0);
-   dma_desc->ctl1 = cpu_to_le32(ctl1);
+   WRITE_ONCE(dma_desc->addr_low, 
cpu_to_le32(lower_32_bits(slot->dma_addr)));
+   WRITE_ONCE(dma_desc->addr_high, 
cpu_to_le32(upper_32_bits(slot->dma_addr)));
+   WRITE_ONCE(dma_desc->ctl0, cpu_to_le32(ctl0));
+   WRITE_ONCE(dma_desc->ctl1, cpu_to_le32(ctl1));
 }

 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
@@ -387,10 +387,10 @@ static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
 * B43_DMA64_DCTL1_ADDREXT_MASK;
 */

-   dma_desc->addr_low = 
cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
-   dma_desc->addr_high = 
cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
-   dma_desc->ctl0 = cpu_to_le32(ctl0);
-   dma_desc->ctl1 = cpu_to_le32(ctl1);
+   WRITE_ONCE(dma_desc->addr_low, 
cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)));
+   WRITE_ONCE(dma_desc->addr_high, 
cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)));
+   WRITE_ONCE(dma_desc->ctl0, cpu_to_le32(ctl0));
+   WRITE_ONCE(dma_desc->ctl1, cpu_to_le32(ctl1));

ring->end = desc_idx;
 }

___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


Re: [PATCH] umbim: fix invalid mbim message string encoding

2022-05-10 Thread Daniel Danzberger
On Tue, 2022-05-10 at 12:57 +0200, Bjørn Mork wrote:
>     Unless otherwise specified, all strings use UNICODE UTF-16LE
>     encodings limited to characters from the Basic Multilingual
>     Plane. Strings shall not be terminated by a NULL character.
> 
> > +   /* convert to utf-16 little endian */
> > for (i = 0; i < l; i++)
> > -   p[i * 2] = in[i];
> > +   p[i * 2] = htole16(in[i]);
> >  
> > return 0;
> >  }
> 
> 
> This new code is buggy.  It fails on BE and makes no difference on
> LE. Both p and in are byte arrays. The byte you write into p on a BE
> system will be aither 0x00 or 0xff depending on the MSB of in[i].

True, I will remove that le16 conversion part and retest.
> 
> The previous code was sort of correct, assuming that the input is mostly
> ascii mapping to the same value in unicode. Ideally we should do real
> utf16le conversion of the input. But then someone has to decide if the
> input is utf8 or something else first.  And you need a real utf16
> implementation.  Not sure it's worth it.  Did you ever see an operator
> use a non-ascii APN, username, password or pin code?
Agree, it's best for now to just leave it as is.

-- 
Regards

Daniel Danzberger
embeDD GmbH, Alter Postplatz 2, CH-6370 Stans

___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


Re: [PATCH] umbim: fix invalid mbim message string encoding

2022-05-10 Thread Bjørn Mork
Daniel Danzberger  writes:

> Strings in mbim messages have to follow these formatting rules:
>  - 4 byte alignment, padded if not.

ack. MBIM spec section 10.3:

All fields (either static or variable size) start at a 4-byte
boundary. Hence, all field offsets shall be multiples of 4. Between
the end of a field (not necessarily at a 4-byte boundary) and the
beginning of the next field (always at a 4-byte boundary), padding
may be required. Padding bytes must be set to 0 upon transmission
and ignored upon reception.
..
If the size of the payload in the variable field is not a multiple
of 4 bytes, the field shall be padded up to the next 4 byte
multiple. This shall be true even for the last payload in
DataBuffer.

>  - utf-16 little endian.

ack. MBIM spec section 10.4:

Unless otherwise specified, all strings use UNICODE UTF-16LE
encodings limited to characters from the Basic Multilingual
Plane. Strings shall not be terminated by a NULL character.

> + /* convert to utf-16 little endian */
>   for (i = 0; i < l; i++)
> - p[i * 2] = in[i];
> + p[i * 2] = htole16(in[i]);
>  
>   return 0;
>  }


This new code is buggy.  It fails on BE and makes no difference on
LE. Both p and in are byte arrays. The byte you write into p on a BE
system will be aither 0x00 or 0xff depending on the MSB of in[i].

The previous code was sort of correct, assuming that the input is mostly
ascii mapping to the same value in unicode. Ideally we should do real
utf16le conversion of the input. But then someone has to decide if the
input is utf8 or something else first.  And you need a real utf16
implementation.  Not sure it's worth it.  Did you ever see an operator
use a non-ascii APN, username, password or pin code?



Bjørn

___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


Re: Optimizing kernel compilation / alignments for network performance

2022-05-10 Thread Rafał Miłecki

On 6.05.2022 14:42, Andrew Lunn wrote:

I just took a quick look at the driver. It allocates and maps rx buffers that 
can cover a packet size of BGMAC_RX_MAX_FRAME_SIZE = 9724.
This seems rather excessive, especially since most people are going to use a 
MTU of 1500.
My proposal would be to add support for making rx buffer size dependent on MTU, 
reallocating the ring on MTU changes.
This should significantly reduce the time spent on flushing caches.


Oh, that's important too, it was changed by commit 8c7da63978f1 ("bgmac:
configure MTU and add support for frames beyond 8192 byte size"):
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8c7da63978f1672eb4037bbca6e7eac73f908f03

It lowered NAT speed with bgmac by 60% (362 Mbps → 140 Mbps).

I do all my testing with
#define BGMAC_RX_MAX_FRAME_SIZE 1536


That helps show that cache operations are part of your bottleneck.

Taking a quick look at the driver. On the receive side:

/* Unmap buffer to make it accessible to the CPU */
 dma_unmap_single(dma_dev, dma_addr,
  BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);

Here is data is mapped read for the CPU to use it.

/* Get info from the header */
 len = le16_to_cpu(rx->len);
 flags = le16_to_cpu(rx->flags);

 /* Check for poison and drop or pass the packet */
 if (len == 0xdead && flags == 0xbeef) {
 netdev_err(bgmac->net_dev, "Found poisoned packet 
at slot %d, DMA issue!\n",
ring->start);
 put_page(virt_to_head_page(buf));
 bgmac->net_dev->stats.rx_errors++;
 break;
 }

 if (len > BGMAC_RX_ALLOC_SIZE) {
 netdev_err(bgmac->net_dev, "Found oversized packet 
at slot %d, DMA issue!\n",
ring->start);
 put_page(virt_to_head_page(buf));
 bgmac->net_dev->stats.rx_length_errors++;
 bgmac->net_dev->stats.rx_errors++;
 break;
 }

 /* Omit CRC. */
 len -= ETH_FCS_LEN;

 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
 if (unlikely(!skb)) {
 netdev_err(bgmac->net_dev, "build_skb 
failed\n");
 put_page(virt_to_head_page(buf));
 bgmac->net_dev->stats.rx_errors++;
 break;
 }
 skb_put(skb, BGMAC_RX_FRAME_OFFSET +
 BGMAC_RX_BUF_OFFSET + len);
 skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
  BGMAC_RX_BUF_OFFSET);

 skb_checksum_none_assert(skb);
 skb->protocol = eth_type_trans(skb, bgmac->net_dev);

and this is the first access of the actual data. You can make the
cache actually work for you, rather than against you, to adding a call to

prefetch(buf);

just after the dma_unmap_single(). That will start getting the frame
header from DRAM into cache, so hopefully it is available by the time
eth_type_trans() is called and you don't have a cache miss.



I don't think that analysis is correct.

Please take a look at following lines:
struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
void *buf = slot->buf;

The first we do after dma_unmap_single() call is rx->len read. That
actually points to DMA data. There is nothing we could keep CPU busy
with while preteching data.

FWIW I tried adding prefetch(buf); anyway. I didn't change NAT speed by
a single 1 Mb/s. Speed was exactly the same as without prefetch() call.

___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


[PATCH] umbim: fix invalid mbim message string encoding

2022-05-10 Thread Daniel Danzberger
Strings in mbim messages have to follow these formatting rules:
 - 4 byte alignment, padded if not.
 - utf-16 little endian.

Fixes:
 - mbim connect fails with more than 1 string parameter (apn/user/pass)
   when they are not 4 byte aligned.

Signed-off-by: Daniel Danzberger 
---
 mbim-msg.c | 13 -
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/mbim-msg.c b/mbim-msg.c
index 5ec04f4..8465091 100644
--- a/mbim-msg.c
+++ b/mbim-msg.c
@@ -53,8 +53,10 @@ mbim_add_payload(uint8_t len)
 int
 mbim_encode_string(struct mbim_string *str, char *in)
 {
-   int l = strlen(in);
-   int s = mbim_add_payload(l * 2);
+   const int l = strlen(in);
+   const int utf16_len = l * 2;
+   const int pad_len = utf16_len % 4;
+   const int s = mbim_add_payload(utf16_len + pad_len);
uint8_t *p = _buffer[s];
int i;
 
@@ -62,14 +64,15 @@ mbim_encode_string(struct mbim_string *str, char *in)
return -1;
 
str->offset = htole32(s);
-   str->length = htole32(l * 2);
+   str->length = htole32(utf16_len);
+
+   /* convert to utf-16 little endian */
for (i = 0; i < l; i++)
-   p[i * 2] = in[i];
+   p[i * 2] = htole16(in[i]);
 
return 0;
 }
 
-
 char *
 mbim_get_string(struct mbim_string *str, char *in)
 {
-- 
2.35.1


___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel


Re: [PATCH 1/4] uclient-fetch: --header option to pass additional raw HTTP headers

2022-05-10 Thread Jo-Philipp Wich
Hi Sergey,

a minor nitpick inline below.

On 5/9/22 11:59 PM, Sergey Ponomarev wrote:
> You can add a custom HTTP header(s) to request:
> 
> wget --header='Authorization: Bearer TOKEN' \
> --header='If-Modified-Since: Wed, 9 May 2021 12:16:00 GMT' \
> https://example.com/
> 
> Some headers like Authorization or User-Agent may be already set by 
> --password or --user-agent.
> We may override them but it's a protection from user itself.
> To keep code concise the logic omitted.
> 
> Signed-off-by: Sergey Ponomarev 
> ---
> [...]
> + case L_HEADER:
> + if (!raw_headers) {
> + /* Max possible count of headers is the 
> count of args (argc) - 2
> +  Since the first arg is program and 
> last is a URL.
> +  But user may forget the URL and 
> raw_headers is null terminated so allocate argc */
> + raw_headers = calloc(argc, sizeof(char 
> *));

Please handle a possible calloc() error here. Maybe consider introducing an
"xalloc()" or similar helper which wraps calloc() and invokes abort() on NULL
return.

> + }
> + raw_headers[raw_headers_count] = optarg;
> + raw_headers_count++;
> + break;
>   case L_POST_DATA:
>   post_data = optarg;
>   break;
> [...]



signature.asc
Description: OpenPGP digital signature
___
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel