@@ -15,7 +15,8 @@ mtk_tmi-y = \
mtk_fsm.o \
mtk_dpmaif.o \
mtk_wwan.o \
- mtk_ethtool.o
+ mtk_ethtool.o \
+ mtk_except.o
ccflags-y += -I$(srctree)/$(src)/
ccflags-y += -I$(srctree)/$(src)/pcie/
@@ -180,19 +180,29 @@ static int mtk_cldma_submit_tx(void *dev, struct sk_buff *skb)
struct tx_req *req;
struct virtq *vq;
struct txq *txq;
+ int ret = 0;
int err;
vq = cd->trans->vq_tbl + trb->vqno;
hw = cd->cldma_hw[vq->hif_id & HIF_ID_BITMASK];
txq = hw->txq[vq->txqno];
- if (!txq->req_budget)
- return -EAGAIN;
+ if (!txq->req_budget) {
+ if (mtk_hw_mmio_check(hw->mdev)) {
+ mtk_except_report_evt(hw->mdev, EXCEPT_LINK_ERR);
+ ret = -EFAULT;
+ } else {
+ ret = -EAGAIN;
+ }
+ goto err;
+ }
err = mtk_dma_map_single(hw->mdev, &data_dma_addr, skb->data,
skb->len, DMA_TO_DEVICE);
- if (err)
- return -EFAULT;
+ if (err) {
+ ret = -EFAULT;
+ goto err;
+ }
mutex_lock(&txq->lock);
txq->req_budget--;
@@ -213,7 +223,8 @@ static int mtk_cldma_submit_tx(void *dev, struct sk_buff *skb)
wmb(); /* ensure GPD setup done before HW start */
- return 0;
+err:
+ return ret;
}
/* cldma_trb_process() - Dispatch trb request to low-level CLDMA routine
@@ -29,6 +29,13 @@ int mtk_dev_init(struct mtk_md_dev *mdev)
if (ret)
goto err_data_init;
+ ret = mtk_except_init(mdev);
+ if (ret)
+ goto err_except_init;
+
+ return 0;
+err_except_init:
+ mtk_data_exit(mdev);
err_data_init:
mtk_ctrl_exit(mdev);
err_ctrl_init:
@@ -46,6 +53,7 @@ void mtk_dev_exit(struct mtk_md_dev *mdev)
mtk_data_exit(mdev);
mtk_ctrl_exit(mdev);
mtk_bm_exit(mdev);
+ mtk_except_exit(mdev);
mtk_fsm_exit(mdev);
}
@@ -39,6 +39,7 @@ enum mtk_reset_type {
RESET_FLDR,
RESET_PLDR,
RESET_RGU,
+ RESET_NONE
};
enum mtk_reinit_type {
@@ -51,6 +52,15 @@ enum mtk_l1ss_grp {
L1SS_EXT_EVT,
};
+enum mtk_except_evt {
+ EXCEPT_LINK_ERR,
+ EXCEPT_RGU,
+ EXCEPT_AER_DETECTED,
+ EXCEPT_AER_RESET,
+ EXCEPT_AER_RESUME,
+ EXCEPT_MAX
+};
+
#define L1SS_BIT_L1(grp) BIT(((grp) << 2) + 1)
#define L1SS_BIT_L1_1(grp) BIT(((grp) << 2) + 2)
#define L1SS_BIT_L1_2(grp) BIT(((grp) << 2) + 3)
@@ -83,6 +93,7 @@ struct mtk_md_dev;
* @get_ext_evt_status:Callback to get HW Layer external event status.
* @reset: Callback to reset device.
* @reinit: Callback to execute device re-initialization.
+ * @link_check: Callback to execute hardware link check.
* @get_hp_status: Callback to get link hotplug status.
*/
struct mtk_hw_ops {
@@ -119,10 +130,18 @@ struct mtk_hw_ops {
int (*reset)(struct mtk_md_dev *mdev, enum mtk_reset_type type);
int (*reinit)(struct mtk_md_dev *mdev, enum mtk_reinit_type type);
+ bool (*link_check)(struct mtk_md_dev *mdev);
bool (*mmio_check)(struct mtk_md_dev *mdev);
int (*get_hp_status)(struct mtk_md_dev *mdev);
};
+struct mtk_md_except {
+ atomic_t flag;
+ enum mtk_reset_type type;
+ int pci_ext_irq_id;
+ struct timer_list timer;
+};
+
/* mtk_md_dev defines the structure of MTK modem device */
struct mtk_md_dev {
struct device *dev;
@@ -136,6 +155,7 @@ struct mtk_md_dev {
void *ctrl_blk;
void *data_blk;
struct mtk_bm_ctrl *bm_ctrl;
+ struct mtk_md_except except;
};
int mtk_dev_init(struct mtk_md_dev *mdev);
@@ -429,6 +449,17 @@ static inline int mtk_hw_reinit(struct mtk_md_dev *mdev, enum mtk_reinit_type ty
return mdev->hw_ops->reinit(mdev, type);
}
+/* mtk_hw_link_check() -Check if the link is down.
+ *
+ * @mdev: Device instance.
+ *
+ * Return: 0 indicates link normally, other value indicates link down.
+ */
+static inline bool mtk_hw_link_check(struct mtk_md_dev *mdev)
+{
+ return mdev->hw_ops->link_check(mdev);
+}
+
/* mtk_hw_mmio_check() -Check if the PCIe MMIO is ready.
*
* @mdev: Device instance.
@@ -517,4 +548,51 @@ static inline int mtk_dma_unmap_page(struct mtk_md_dev *mdev,
return 0;
}
+/* mtk_except_report_evt() - Report exception event.
+ *
+ * @mdev: pointer to mtk_md_dev
+ * @evt: exception event
+ *
+ * Return:
+ * 0 - OK
+ * -EFAULT - exception feature is not ready
+ */
+int mtk_except_report_evt(struct mtk_md_dev *mdev, enum mtk_except_evt evt);
+
+/* mtk_except_start() - Start exception service.
+ *
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return:
+ * void
+ */
+void mtk_except_start(struct mtk_md_dev *mdev);
+
+/* mtk_except_stop() - Stop exception service.
+ *
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return:
+ * void
+ */
+void mtk_except_stop(struct mtk_md_dev *mdev);
+
+/* mtk_except_init() - Initialize exception feature.
+ *
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return:
+ * 0 - OK
+ */
+int mtk_except_init(struct mtk_md_dev *mdev);
+
+/* mtk_except_exit() - De-Initialize exception feature.
+ *
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return:
+ * 0 - OK
+ */
+int mtk_except_exit(struct mtk_md_dev *mdev);
+
#endif /* __MTK_DEV_H__ */
@@ -536,10 +536,12 @@ static void mtk_dpmaif_common_err_handle(struct mtk_dpmaif_ctlb *dcb, bool is_hw
return;
}
- if (mtk_hw_mmio_check(DCB_TO_MDEV(dcb)))
+ if (mtk_hw_mmio_check(DCB_TO_MDEV(dcb))) {
dev_err(DCB_TO_DEV(dcb), "Failed to access mmio\n");
- else
+ mtk_except_report_evt(DCB_TO_MDEV(dcb), EXCEPT_LINK_ERR);
+ } else {
mtk_dpmaif_trigger_dev_exception(dcb);
+ }
}
static unsigned int mtk_dpmaif_pit_bid(struct dpmaif_pd_pit *pit_info)
@@ -1354,7 +1356,7 @@ static unsigned int mtk_dpmaif_poll_tx_drb(struct dpmaif_txq *txq)
old_sw_rd_idx = txq->drb_rd_idx;
ret = mtk_dpmaif_drv_get_ring_idx(dcb->drv_info, DPMAIF_DRB_RIDX, txq->id);
if (unlikely(ret < 0)) {
- dev_err(DCB_TO_DEV(dcb), "Failed to read txq%u drb_rd_idx, ret=%d", txq->id, ret);
+ dev_err(DCB_TO_DEV(dcb), "Failed to read txq%u drb_rd_idx, ret=%d\n", txq->id, ret);
mtk_dpmaif_common_err_handle(dcb, true);
return 0;
}
@@ -2274,7 +2276,6 @@ static void mtk_dpmaif_trans_disable(struct mtk_dpmaif_ctlb *dcb)
static void mtk_dpmaif_trans_ctl(struct mtk_dpmaif_ctlb *dcb, bool enable)
{
mutex_lock(&dcb->trans_ctl_lock);
-
if (enable) {
if (!dcb->trans_enabled) {
if (dcb->dpmaif_state == DPMAIF_STATE_PWRON &&
@@ -2641,7 +2642,8 @@ static int mtk_dpmaif_drv_res_init(struct mtk_dpmaif_ctlb *dcb)
if (DPMAIF_GET_HW_VER(dcb) == 0x0800) {
dcb->drv_info->drv_ops = &dpmaif_drv_ops_t800;
} else {
- dev_err(DCB_TO_DEV(dcb), "Unsupported mdev, hw_ver=0x%x", DPMAIF_GET_HW_VER(dcb));
+ devm_kfree(DCB_TO_DEV(dcb), dcb->drv_info);
+ dev_err(DCB_TO_DEV(dcb), "Unsupported mdev, hw_ver=0x%x\n", DPMAIF_GET_HW_VER(dcb));
ret = -EFAULT;
}
@@ -2791,7 +2793,8 @@ static int mtk_dpmaif_irq_init(struct mtk_dpmaif_ctlb *dcb)
irq_param->dpmaif_irq_src = irq_src;
irq_param->dev_irq_id = mtk_hw_get_irq_id(DCB_TO_MDEV(dcb), irq_src);
if (irq_param->dev_irq_id < 0) {
- dev_err(DCB_TO_DEV(dcb), "Failed to allocate irq id, irq_src=%d", irq_src);
+ dev_err(DCB_TO_DEV(dcb), "Failed to allocate irq id, irq_src=%d\n",
+ irq_src);
ret = -EINVAL;
goto err_reg_irq;
}
@@ -3489,6 +3492,7 @@ static int mtk_dpmaif_pit_bid_frag_check(struct dpmaif_rxq *rxq, unsigned int cu
bat_ring = &rxq->dcb->bat_info.frag_bat_ring;
cur_bat_record = bat_ring->sw_record_base + cur_bid;
+
if (unlikely(!cur_bat_record->frag.page || cur_bid >= bat_ring->bat_cnt)) {
dev_err(DCB_TO_DEV(dcb),
"Invalid parameter rxq%u bat%d, bid=%u, bat_cnt=%u\n",
@@ -84,12 +84,12 @@ enum mtk_drv_err {
enum {
DPMAIF_CLEAR_INTR,
- DPMAIF_UNMASK_INTR
+ DPMAIF_UNMASK_INTR,
};
enum dpmaif_drv_dlq_id {
DPMAIF_DLQ0 = 0,
- DPMAIF_DLQ1
+ DPMAIF_DLQ1,
};
struct dpmaif_drv_dlq {
@@ -132,7 +132,7 @@ enum dpmaif_drv_ring_type {
DPMAIF_PIT,
DPMAIF_BAT,
DPMAIF_FRAG,
- DPMAIF_DRB
+ DPMAIF_DRB,
};
enum dpmaif_drv_ring_idx {
@@ -143,7 +143,7 @@ enum dpmaif_drv_ring_idx {
DPMAIF_FRAG_WIDX,
DPMAIF_FRAG_RIDX,
DPMAIF_DRB_WIDX,
- DPMAIF_DRB_RIDX
+ DPMAIF_DRB_RIDX,
};
struct dpmaif_drv_irq_en_mask {
@@ -184,7 +184,7 @@ enum dpmaif_drv_intr_type {
DPMAIF_INTR_DL_FRGCNT_LEN_ERR,
DPMAIF_INTR_DL_PITCNT_LEN_ERR,
DPMAIF_INTR_DL_DONE,
- DPMAIF_INTR_MAX,
+ DPMAIF_INTR_MAX
};
#define DPMAIF_INTR_COUNT ((DPMAIF_INTR_MAX) - (DPMAIF_INTR_MIN) - 1)
new file mode 100644
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022, MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+
+#include "mtk_dev.h"
+#include "mtk_fsm.h"
+
+#define MTK_EXCEPT_HOST_RESET_TIME (2)
+#define MTK_EXCEPT_SELF_RESET_TIME (35)
+#define MTK_EXCEPT_RESET_TYPE_PLDR BIT(26)
+#define MTK_EXCEPT_RESET_TYPE_FLDR BIT(27)
+
+static void mtk_except_start_monitor(struct mtk_md_dev *mdev, unsigned long expires)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ if (!timer_pending(&except->timer) && !mtk_hw_get_hp_status(mdev)) {
+ except->timer.expires = jiffies + expires;
+ add_timer(&except->timer);
+ dev_info(mdev->dev, "Add timer to monitor PCI link\n");
+ }
+}
+
+int mtk_except_report_evt(struct mtk_md_dev *mdev, enum mtk_except_evt evt)
+{
+ struct mtk_md_except *except = &mdev->except;
+ int err, val;
+
+ if (atomic_read(&except->flag) != 1)
+ return -EFAULT;
+
+ switch (evt) {
+ case EXCEPT_LINK_ERR:
+ err = mtk_hw_mmio_check(mdev);
+ if (err)
+ mtk_fsm_evt_submit(mdev, FSM_EVT_LINKDOWN, FSM_F_DFLT, NULL, 0, 0);
+ break;
+ case EXCEPT_RGU:
+ /* delay 20ms to make sure device ready for reset */
+ msleep(20);
+
+ val = mtk_hw_get_dev_state(mdev);
+ dev_info(mdev->dev, "dev_state:0x%x, hw_ver:0x%x, fsm state:%d\n",
+ val, mdev->hw_ver, mdev->fsm->state);
+
+ /* Invalid dev state will trigger PLDR */
+ if (val & MTK_EXCEPT_RESET_TYPE_PLDR) {
+ except->type = RESET_PLDR;
+ } else if (val & MTK_EXCEPT_RESET_TYPE_FLDR) {
+ except->type = RESET_FLDR;
+ } else if (mdev->fsm->state >= FSM_STATE_READY) {
+ dev_info(mdev->dev, "HW reboot\n");
+ except->type = RESET_NONE;
+ } else {
+ dev_info(mdev->dev, "RGU ignored\n");
+ break;
+ }
+ mtk_fsm_evt_submit(mdev, FSM_EVT_DEV_RESET_REQ, FSM_F_DFLT, NULL, 0, 0);
+ break;
+ case EXCEPT_AER_DETECTED:
+ mtk_fsm_evt_submit(mdev, FSM_EVT_AER, FSM_F_DFLT, NULL, 0, EVT_MODE_BLOCKING);
+ break;
+ case EXCEPT_AER_RESET:
+ err = mtk_hw_reset(mdev, RESET_FLDR);
+ if (err)
+ mtk_hw_reset(mdev, RESET_RGU);
+ break;
+ case EXCEPT_AER_RESUME:
+ mtk_except_start_monitor(mdev, HZ);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void mtk_except_start(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ mtk_hw_unmask_irq(mdev, except->pci_ext_irq_id);
+}
+
+void mtk_except_stop(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ mtk_hw_mask_irq(mdev, except->pci_ext_irq_id);
+}
+
+static void mtk_except_fsm_handler(struct mtk_fsm_param *param, void *data)
+{
+ struct mtk_md_except *except = data;
+ enum mtk_reset_type reset_type;
+ struct mtk_md_dev *mdev;
+ unsigned long expires;
+ int err;
+
+ mdev = container_of(except, struct mtk_md_dev, except);
+
+ switch (param->to) {
+ case FSM_STATE_POSTDUMP:
+ mtk_hw_mask_irq(mdev, except->pci_ext_irq_id);
+ mtk_hw_clear_irq(mdev, except->pci_ext_irq_id);
+ mtk_hw_unmask_irq(mdev, except->pci_ext_irq_id);
+ break;
+ case FSM_STATE_OFF:
+ if (param->evt_id == FSM_EVT_DEV_RESET_REQ)
+ reset_type = except->type;
+ else if (param->evt_id == FSM_EVT_LINKDOWN)
+ reset_type = RESET_FLDR;
+ else
+ break;
+
+ if (reset_type == RESET_NONE) {
+ expires = MTK_EXCEPT_SELF_RESET_TIME * HZ;
+ } else {
+ err = mtk_hw_reset(mdev, reset_type);
+ if (err)
+ expires = MTK_EXCEPT_SELF_RESET_TIME * HZ;
+ else
+ expires = MTK_EXCEPT_HOST_RESET_TIME * HZ;
+ }
+
+ mtk_except_start_monitor(mdev, expires);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mtk_except_link_monitor(struct timer_list *timer)
+{
+ struct mtk_md_except *except = container_of(timer, struct mtk_md_except, timer);
+ struct mtk_md_dev *mdev = container_of(except, struct mtk_md_dev, except);
+ int err;
+
+ err = mtk_hw_link_check(mdev);
+ if (!err) {
+ mtk_fsm_evt_submit(mdev, FSM_EVT_REINIT, FSM_F_FULL_REINIT, NULL, 0, 0);
+ del_timer(&except->timer);
+ } else {
+ mod_timer(timer, jiffies + HZ);
+ }
+}
+
+int mtk_except_init(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ except->pci_ext_irq_id = mtk_hw_get_irq_id(mdev, MTK_IRQ_SRC_SAP_RGU);
+
+ mtk_fsm_notifier_register(mdev, MTK_USER_EXCEPT,
+ mtk_except_fsm_handler, except, FSM_PRIO_1, false);
+ timer_setup(&except->timer, mtk_except_link_monitor, 0);
+ atomic_set(&except->flag, 1);
+
+ return 0;
+}
+
+int mtk_except_exit(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ atomic_set(&except->flag, 0);
+ del_timer(&except->timer);
+ mtk_fsm_notifier_unregister(mdev, MTK_USER_EXCEPT);
+
+ return 0;
+}
@@ -516,6 +516,8 @@ static int mtk_fsm_early_bootup_handler(u32 status, void *__fsm)
dev_stage = dev_state & REGION_BITMASK;
if (dev_stage >= DEV_STAGE_MAX) {
dev_err(mdev->dev, "Invalid dev state 0x%x\n", dev_state);
+ if (mtk_hw_link_check(mdev))
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
return -ENXIO;
}
@@ -364,8 +364,10 @@ static void mtk_cldma_tx_done_work(struct work_struct *work)
state = mtk_cldma_check_intr_status(mdev, txq->hw->base_addr,
DIR_TX, txq->txqno, QUEUE_XFER_DONE);
if (state) {
- if (unlikely(state == LINK_ERROR_VAL))
+ if (unlikely(state == LINK_ERROR_VAL)) {
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
return;
+ }
mtk_cldma_clr_intr_status(mdev, txq->hw->base_addr, DIR_TX,
txq->txqno, QUEUE_XFER_DONE);
@@ -452,6 +454,11 @@ static void mtk_cldma_rx_done_work(struct work_struct *work)
if (!state)
break;
+ if (unlikely(state == LINK_ERROR_VAL)) {
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
+ return;
+ }
+
mtk_cldma_clr_intr_status(mdev, rxq->hw->base_addr, DIR_RX,
rxq->rxqno, QUEUE_XFER_DONE);
@@ -750,6 +757,9 @@ int mtk_cldma_txq_free_t800(struct cldma_hw *hw, int vqno)
devm_kfree(hw->mdev->dev, txq);
hw->txq[txqno] = NULL;
+ if (active == LINK_ERROR_VAL)
+ mtk_except_report_evt(hw->mdev, EXCEPT_LINK_ERR);
+
return 0;
}
@@ -915,6 +925,9 @@ int mtk_cldma_rxq_free_t800(struct cldma_hw *hw, int vqno)
devm_kfree(mdev->dev, rxq);
hw->rxq[rxqno] = NULL;
+ if (active == LINK_ERROR_VAL)
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
+
return 0;
}
@@ -536,6 +536,8 @@ static int mtk_pci_reset(struct mtk_md_dev *mdev, enum mtk_reset_type type)
return mtk_pci_fldr(mdev);
case RESET_PLDR:
return mtk_pci_pldr(mdev);
+ default:
+ break;
}
return -EINVAL;
@@ -547,6 +549,12 @@ static int mtk_pci_reinit(struct mtk_md_dev *mdev, enum mtk_reinit_type type)
struct mtk_pci_priv *priv = mdev->hw_priv;
int ret, ltr, l1ss;
+ if (type == REINIT_TYPE_EXP) {
+ /* We have saved it in probe() */
+ pci_load_saved_state(pdev, priv->saved_state);
+ pci_restore_state(pdev);
+ }
+
/* restore ltr */
ltr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (ltr) {
@@ -571,6 +579,9 @@ static int mtk_pci_reinit(struct mtk_md_dev *mdev, enum mtk_reinit_type type)
mtk_pci_set_msix_merged(priv, priv->irq_cnt);
}
+ if (type == REINIT_TYPE_EXP)
+ mtk_pci_clear_irq(mdev, priv->rgu_irq_id);
+
mtk_pci_unmask_irq(mdev, priv->rgu_irq_id);
mtk_pci_unmask_irq(mdev, priv->mhccif_irq_id);
@@ -634,6 +645,7 @@ static const struct mtk_hw_ops mtk_pci_ops = {
.get_ext_evt_status = mtk_mhccif_get_evt_status,
.reset = mtk_pci_reset,
.reinit = mtk_pci_reinit,
+ .link_check = mtk_pci_link_check,
.mmio_check = mtk_pci_mmio_check,
.get_hp_status = mtk_pci_get_hp_status,
};
@@ -654,6 +666,7 @@ static void mtk_mhccif_isr_work(struct work_struct *work)
if (unlikely(stat == U32_MAX && mtk_pci_link_check(mdev))) {
/* When link failed, we don't need to unmask/clear. */
dev_err(mdev->dev, "Failed to check link in MHCCIF handler.\n");
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
return;
}
@@ -778,6 +791,7 @@ static void mtk_rgu_work(struct work_struct *work)
struct mtk_pci_priv *priv;
struct mtk_md_dev *mdev;
struct pci_dev *pdev;
+ int ret;
priv = container_of(to_delayed_work(work), struct mtk_pci_priv, rgu_work);
mdev = priv->mdev;
@@ -788,6 +802,10 @@ static void mtk_rgu_work(struct work_struct *work)
mtk_pci_mask_irq(mdev, priv->rgu_irq_id);
mtk_pci_clear_irq(mdev, priv->rgu_irq_id);
+ ret = mtk_except_report_evt(mdev, EXCEPT_RGU);
+ if (ret)
+ dev_err(mdev->dev, "Failed to report exception with EXCEPT_RGU\n");
+
if (!pdev->msix_enabled)
return;
@@ -800,8 +818,14 @@ static int mtk_rgu_irq_cb(int irq_id, void *data)
struct mtk_pci_priv *priv;
priv = mdev->hw_priv;
+
+ if (delayed_work_pending(&priv->rgu_work))
+ goto exit;
+
schedule_delayed_work(&priv->rgu_work, msecs_to_jiffies(1));
+ dev_info(mdev->dev, "RGU IRQ arrived\n");
+exit:
return 0;
}
@@ -1129,16 +1153,39 @@ static void mtk_pci_remove(struct pci_dev *pdev)
static pci_ers_result_t mtk_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
+ struct mtk_md_dev *mdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = mtk_except_report_evt(mdev, EXCEPT_AER_DETECTED);
+ if (ret)
+ dev_err(mdev->dev, "Failed to call excpetion report API with EXCEPT_AER_DETECTED!\n");
+ dev_info(mdev->dev, "AER detected: pci_channel_state_t=%d\n", state);
+
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t mtk_pci_slot_reset(struct pci_dev *pdev)
{
+ struct mtk_md_dev *mdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = mtk_except_report_evt(mdev, EXCEPT_AER_RESET);
+ if (ret)
+ dev_err(mdev->dev, "Failed to call excpetion report API with EXCEPT_AER_RESET!\n");
+ dev_info(mdev->dev, "Slot reset!\n");
+
return PCI_ERS_RESULT_RECOVERED;
}
static void mtk_pci_io_resume(struct pci_dev *pdev)
{
+ struct mtk_md_dev *mdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = mtk_except_report_evt(mdev, EXCEPT_AER_RESUME);
+ if (ret)
+ dev_err(mdev->dev, "Failed to call excpetion report API with EXCEPT_AER_RESUME!\n");
+ dev_info(mdev->dev, "IO resume!\n");
}
static const struct pci_error_handlers mtk_pci_err_handler = {