Hi,

On 1/23/2026 11:45 PM, Shuai Xue wrote:
> The AER driver has historically avoided reading the configuration space of
> an endpoint or RCiEP that reported a fatal error, considering the link to
> that device unreliable. Consequently, when a fatal error occurs, the AER
> and DPC drivers do not report specific error types, resulting in logs like:
> 
>   pcieport 0015:00:00.0: EDR: EDR event received
>   pcieport 0015:00:00.0: EDR: Reported EDR dev: 0015:00:00.0
>   pcieport 0015:00:00.0: DPC: containment event, status:0x200d, ERR_FATAL 
> received from 0015:01:00.0
>   pcieport 0015:00:00.0: AER: broadcast error_detected message
>   pcieport 0015:00:00.0: AER: broadcast mmio_enabled message
>   pcieport 0015:00:00.0: AER: broadcast resume message
>   pcieport 0015:00:00.0: pciehp: Slot(21): Link Down/Up ignored
>   pcieport 0015:00:00.0: AER: device recovery successful
>   pcieport 0015:00:00.0: EDR: DPC port successfully recovered
>   pcieport 0015:00:00.0: EDR: Status for 0015:00:00.0: 0x80
> 
> AER status registers are sticky and Write-1-to-clear. If the link recovered
> after hot reset, we can still safely access AER status and TLP header of the
> error device. In such case, report fatal errors which helps to figure out the
> error root case.
> 
> After this patch, the logs like:
> 
>   pcieport 0015:00:00.0: EDR: EDR event received
>   pcieport 0015:00:00.0: EDR: Reported EDR dev: 0015:00:00.0
>   pcieport 0015:00:00.0: DPC: containment event, status:0x200d, ERR_FATAL 
> received from 0015:01:00.0
>   pcieport 0015:00:00.0: AER: broadcast error_detected message
> + vfio-pci 0015:01:00.0: AER: Errors reported prior to reset
> + vfio-pci 0015:01:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), 
> type=Transaction Layer, (Receiver ID)
> + vfio-pci 0015:01:00.0:   device [144d:a80a] error 
> status/mask=00001000/00400000
> + vfio-pci 0015:01:00.0:    [12] TLP                    (First)
> + vfio-pci 0015:01:00.0: AER:   TLP Header: 0x4a004010 0x00000040 0x01000000 
> 0xffffffff
>   pcieport 0015:00:00.0: AER: broadcast mmio_enabled message
>   pcieport 0015:00:00.0: AER: broadcast resume message
>   pcieport 0015:00:00.0: pciehp: Slot(21): Link Down/Up ignored
>   pcieport 0015:00:00.0: AER: device recovery successful
>   pcieport 0015:00:00.0: EDR: DPC port successfully recovered
>   pcieport 0015:00:00.0: EDR: Status for 0015:00:00.0: 0x80
> 
> Signed-off-by: Shuai Xue <[email protected]>
> ---

LGTM. Few suggestions inline.

Reviewed-by: Kuppuswamy Sathyanarayanan 
<[email protected]>


>  drivers/pci/pci.h      |  4 +++-
>  drivers/pci/pcie/aer.c | 32 ++++++++++++++++++++++++++++----
>  drivers/pci/pcie/dpc.c |  2 +-
>  drivers/pci/pcie/err.c |  5 +++++
>  4 files changed, 37 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
> index 58640e656897..bd020ba0cef0 100644
> --- a/drivers/pci/pci.h
> +++ b/drivers/pci/pci.h
> @@ -746,8 +746,10 @@ struct aer_err_info {
>       struct pcie_tlp_log tlp;        /* TLP Header */
>  };
>  
> -int aer_get_device_error_info(struct aer_err_info *info, int i);
> +int aer_get_device_error_info(struct aer_err_info *info, int i,
> +                           bool link_healthy);
>  void aer_print_error(struct aer_err_info *info, int i);
> +void aer_report_frozen_error(struct pci_dev *dev);
>  
>  int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
>                     unsigned int tlp_len, bool flit,
> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
> index e0bcaa896803..4c0a2bbe9197 100644
> --- a/drivers/pci/pcie/aer.c
> +++ b/drivers/pci/pcie/aer.c
> @@ -1384,12 +1384,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
>   * aer_get_device_error_info - read error status from dev and store it to 
> info
>   * @info: pointer to structure to store the error record
>   * @i: index into info->dev[]
> + * @link_healthy: link is healthy or not
>   *
>   * Return: 1 on success, 0 on error.
>   *
>   * Note that @info is reused among all error devices. Clear fields properly.
>   */
> -int aer_get_device_error_info(struct aer_err_info *info, int i)
> +int aer_get_device_error_info(struct aer_err_info *info, int i,
> +                           bool link_healthy)
>  {
>       struct pci_dev *dev;
>       int type, aer;
> @@ -1420,7 +1422,8 @@ int aer_get_device_error_info(struct aer_err_info 
> *info, int i)
>       } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
>                  type == PCI_EXP_TYPE_RC_EC ||
>                  type == PCI_EXP_TYPE_DOWNSTREAM ||
> -                info->severity == AER_NONFATAL) {
> +                info->severity == AER_NONFATAL ||
> +                (info->severity == AER_FATAL && link_healthy)) {
>  
>               /* Link is still healthy for IO reads */
>               pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
> @@ -1447,17 +1450,38 @@ int aer_get_device_error_info(struct aer_err_info 
> *info, int i)
>       return 1;
>  }
>  
> +void aer_report_frozen_error(struct pci_dev *dev)

Since this function focuses specifically on printing fatal error details, would
aer_print_fatal_error() be a more descriptive name?

> +{
> +     struct aer_err_info info;
> +     int type = pci_pcie_type(dev);
> +
> +     if (type != PCI_EXP_TYPE_ENDPOINT && type != PCI_EXP_TYPE_RC_END)
> +             return;
> +
> +     info.error_dev_num = 0;
> +     info.severity = AER_FATAL;
> +     info.level = KERN_ERR;
> +     add_error_device(&info, dev);
> +
> +     if (aer_get_device_error_info(&info, 0, true)) {
> +             pci_err(dev, "Errors reported prior to reset\n");

The 'prior to reset' context depends on where this is called. I'd suggest moving
this log to the caller or removing it entirely to keep this helper generic.

> +             aer_print_error(&info, 0);
> +     }
> +
> +     pci_dev_put(dev); /* pairs with pci_dev_get() in add_error_device() */
> +}
> +
>  static inline void aer_process_err_devices(struct aer_err_info *e_info)
>  {
>       int i;
>  
>       /* Report all before handling them, to not lose records by reset etc. */
>       for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
> -             if (aer_get_device_error_info(e_info, i))
> +             if (aer_get_device_error_info(e_info, i, false))
>                       aer_print_error(e_info, i);
>       }
>       for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
> -             if (aer_get_device_error_info(e_info, i))
> +             if (aer_get_device_error_info(e_info, i, false))
>                       handle_error_source(e_info->dev[i], e_info);
>       }
>  }
> diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
> index f6069f621683..21c4e8371279 100644
> --- a/drivers/pci/pcie/dpc.c
> +++ b/drivers/pci/pcie/dpc.c
> @@ -284,7 +284,7 @@ struct pci_dev *dpc_process_error(struct pci_dev *pdev)
>               pci_warn(pdev, "containment event, status:%#06x: unmasked 
> uncorrectable error detected\n",
>                        status);
>               if (dpc_get_aer_uncorrect_severity(pdev, &info) &&
> -                 aer_get_device_error_info(&info, 0)) {
> +                 aer_get_device_error_info(&info, 0, false)) {
>                       aer_print_error(&info, 0);
>                       pci_aer_clear_nonfatal_status(pdev);
>                       pci_aer_clear_fatal_status(pdev);
> diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
> index bebe4bc111d7..0780ea09478b 100644
> --- a/drivers/pci/pcie/err.c
> +++ b/drivers/pci/pcie/err.c
> @@ -253,6 +253,11 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>                       pci_warn(bridge, "subordinate device reset failed\n");
>                       goto failed;
>               }
> +
> +             /* Link recovered, report fatal errors of RCiEP or EP */
> +             if (state == pci_channel_io_frozen)

To align with your comment regarding RCiEPs and EPs, should we explicitly
validate the device type here before calling the report function?

> +                     aer_report_frozen_error(dev);
> +
>       }
>  
>       if (status == PCI_ERS_RESULT_NEED_RESET) {

-- 
Sathyanarayanan Kuppuswamy
Linux Kernel Developer


Reply via email to