@@ -14,7 +14,8 @@ mtk_tmi-y = \
mtk_fsm.o \
mtk_dpmaif.o \
mtk_wwan.o \
- mtk_ethtool.o
+ mtk_ethtool.o \
+ mtk_except.o
ccflags-y += -I$(srctree)/$(src)/
ccflags-y += -I$(srctree)/$(src)/pcie/
@@ -180,14 +180,22 @@ static int mtk_cldma_submit_tx(void *dev, struct sk_buff *skb)
struct tx_req *req;
struct virtq *vq;
struct txq *txq;
+ int ret = 0;
int err;
vq = cd->trans->vq_tbl + trb->vqno;
hw = cd->cldma_hw[vq->hif_id & HIF_ID_BITMASK];
txq = hw->txq[vq->txqno];
- if (!txq->req_budget)
- return -EAGAIN;
+ if (!txq->req_budget) {
+ if (mtk_hw_mmio_check(hw->mdev)) {
+ mtk_except_report_evt(hw->mdev, EXCEPT_LINK_ERR);
+ ret = -EFAULT;
+ } else {
+ ret = -EAGAIN;
+ }
+ goto err;
+ }
data_dma_addr = dma_map_single(hw->mdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
err = dma_mapping_error(hw->mdev->dev, data_dma_addr);
@@ -215,7 +223,8 @@ static int mtk_cldma_submit_tx(void *dev, struct sk_buff *skb)
wmb(); /* ensure GPD setup done before HW start */
- return 0;
+err:
+ return ret;
}
/**
@@ -24,6 +24,13 @@ int mtk_dev_init(struct mtk_md_dev *mdev)
if (ret)
goto err_data_init;
+ ret = mtk_except_init(mdev);
+ if (ret)
+ goto err_except_init;
+
+ return 0;
+err_except_init:
+ mtk_data_exit(mdev);
err_data_init:
mtk_ctrl_exit(mdev);
err_ctrl_init:
@@ -38,6 +45,7 @@ void mtk_dev_exit(struct mtk_md_dev *mdev)
EVT_MODE_BLOCKING | EVT_MODE_TOHEAD);
mtk_data_exit(mdev);
mtk_ctrl_exit(mdev);
+ mtk_except_exit(mdev);
mtk_fsm_exit(mdev);
}
@@ -39,6 +39,7 @@ enum mtk_reset_type {
RESET_FLDR,
RESET_PLDR,
RESET_RGU,
+ RESET_NONE
};
enum mtk_reinit_type {
@@ -51,6 +52,15 @@ enum mtk_l1ss_grp {
L1SS_EXT_EVT,
};
+enum mtk_except_evt {
+ EXCEPT_LINK_ERR,
+ EXCEPT_RGU,
+ EXCEPT_AER_DETECTED,
+ EXCEPT_AER_RESET,
+ EXCEPT_AER_RESUME,
+ EXCEPT_MAX
+};
+
#define L1SS_BIT_L1(grp) BIT(((grp) << 2) + 1)
#define L1SS_BIT_L1_1(grp) BIT(((grp) << 2) + 2)
#define L1SS_BIT_L1_2(grp) BIT(((grp) << 2) + 3)
@@ -87,6 +97,7 @@ struct mtk_md_dev;
* @reset: Callback to reset device.
* @reinit: Callback to execute device re-initialization.
* @mmio_check: Callback to check whether it is available to mmio access device.
+ * @link_check: Callback to execute hardware link check.
* @get_hp_status: Callback to get link hotplug status.
*/
struct mtk_hw_ops {
@@ -118,10 +129,18 @@ struct mtk_hw_ops {
int (*reset)(struct mtk_md_dev *mdev, enum mtk_reset_type type);
int (*reinit)(struct mtk_md_dev *mdev, enum mtk_reinit_type type);
+ bool (*link_check)(struct mtk_md_dev *mdev);
bool (*mmio_check)(struct mtk_md_dev *mdev);
int (*get_hp_status)(struct mtk_md_dev *mdev);
};
+struct mtk_md_except {
+ atomic_t flag;
+ enum mtk_reset_type type;
+ int pci_ext_irq_id;
+ struct timer_list timer;
+};
+
/**
* struct mtk_md_dev - defines the context structure of MTK modem device.
* @dev: pointer to the generic device object.
@@ -134,6 +153,7 @@ struct mtk_hw_ops {
* @ctrl_blk: pointer to the context of control plane submodule.
* @data_blk: pointer to the context of data plane submodule.
* @bm_ctrl: pointer to the context of buffer management submodule.
+ * @except: pointer to the context of driver exception submodule.
*/
struct mtk_md_dev {
struct device *dev;
@@ -147,6 +167,7 @@ struct mtk_md_dev {
void *ctrl_blk;
void *data_blk;
struct mtk_bm_ctrl *bm_ctrl;
+ struct mtk_md_except except;
};
int mtk_dev_init(struct mtk_md_dev *mdev);
@@ -461,6 +482,19 @@ static inline int mtk_hw_reinit(struct mtk_md_dev *mdev, enum mtk_reinit_type ty
return mdev->hw_ops->reinit(mdev, type);
}
+/**
+ * mtk_hw_link_check() - Check if the link is down.
+ * @mdev: Device instance.
+ *
+ * Return:
+ * * 0 - indicates link normally.
+ * * other value - indicates link down.
+ */
+static inline bool mtk_hw_link_check(struct mtk_md_dev *mdev)
+{
+ return mdev->hw_ops->link_check(mdev);
+}
+
/**
* mtk_hw_mmio_check() - Check if the PCIe MMIO is ready.
* @mdev: Device instance.
@@ -487,4 +521,49 @@ static inline int mtk_hw_get_hp_status(struct mtk_md_dev *mdev)
return mdev->hw_ops->get_hp_status(mdev);
}
+/**
+ * mtk_except_report_evt() - Report exception event.
+ * @mdev: pointer to mtk_md_dev
+ * @evt: exception event
+ *
+ * Return:
+ * * 0 - OK
+ * * -EFAULT - exception feature is not ready
+ */
+int mtk_except_report_evt(struct mtk_md_dev *mdev, enum mtk_except_evt evt);
+
+/**
+ * mtk_except_start() - Start exception service.
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return: void
+ */
+void mtk_except_start(struct mtk_md_dev *mdev);
+
+/**
+ * mtk_except_stop() - Stop exception service.
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return: void
+ */
+void mtk_except_stop(struct mtk_md_dev *mdev);
+
+/**
+ * mtk_except_init() - Initialize exception feature.
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return:
+ * * 0 - OK
+ */
+int mtk_except_init(struct mtk_md_dev *mdev);
+
+/**
+ * mtk_except_exit() - De-Initialize exception feature.
+ * @mdev: pointer to mtk_md_dev
+ *
+ * Return:
+ * * 0 - OK
+ */
+int mtk_except_exit(struct mtk_md_dev *mdev);
+
#endif /* __MTK_DEV_H__ */
@@ -534,10 +534,12 @@ static void mtk_dpmaif_common_err_handle(struct mtk_dpmaif_ctlb *dcb, bool is_hw
return;
}
- if (mtk_hw_mmio_check(DCB_TO_MDEV(dcb)))
+ if (mtk_hw_mmio_check(DCB_TO_MDEV(dcb))) {
dev_err(DCB_TO_DEV(dcb), "Failed to access mmio\n");
- else
+ mtk_except_report_evt(DCB_TO_MDEV(dcb), EXCEPT_LINK_ERR);
+ } else {
mtk_dpmaif_trigger_dev_exception(dcb);
+ }
}
static unsigned int mtk_dpmaif_pit_bid(struct dpmaif_pd_pit *pit_info)
@@ -708,10 +710,10 @@ static int mtk_dpmaif_reload_rx_page(struct mtk_dpmaif_ctlb *dcb,
page_info->offset = data - page_address(page_info->page);
page_info->data_len = bat_ring->buf_size;
page_info->data_dma_addr = dma_map_page(DCB_TO_MDEV(dcb)->dev,
- page_info->page,
- page_info->offset,
- page_info->data_len,
- DMA_FROM_DEVICE);
+ page_info->page,
+ page_info->offset,
+ page_info->data_len,
+ DMA_FROM_DEVICE);
ret = dma_mapping_error(DCB_TO_MDEV(dcb)->dev, page_info->data_dma_addr);
if (unlikely(ret)) {
dev_err(DCB_TO_MDEV(dcb)->dev, "Failed to map dma!\n");
@@ -84,12 +84,12 @@ enum mtk_drv_err {
enum {
DPMAIF_CLEAR_INTR,
- DPMAIF_UNMASK_INTR
+ DPMAIF_UNMASK_INTR,
};
enum dpmaif_drv_dlq_id {
DPMAIF_DLQ0 = 0,
- DPMAIF_DLQ1
+ DPMAIF_DLQ1,
};
struct dpmaif_drv_dlq {
@@ -132,7 +132,7 @@ enum dpmaif_drv_ring_type {
DPMAIF_PIT,
DPMAIF_BAT,
DPMAIF_FRAG,
- DPMAIF_DRB
+ DPMAIF_DRB,
};
enum dpmaif_drv_ring_idx {
@@ -143,7 +143,7 @@ enum dpmaif_drv_ring_idx {
DPMAIF_FRAG_WIDX,
DPMAIF_FRAG_RIDX,
DPMAIF_DRB_WIDX,
- DPMAIF_DRB_RIDX
+ DPMAIF_DRB_RIDX,
};
struct dpmaif_drv_irq_en_mask {
@@ -184,7 +184,7 @@ enum dpmaif_drv_intr_type {
DPMAIF_INTR_DL_FRGCNT_LEN_ERR,
DPMAIF_INTR_DL_PITCNT_LEN_ERR,
DPMAIF_INTR_DL_DONE,
- DPMAIF_INTR_MAX,
+ DPMAIF_INTR_MAX
};
#define DPMAIF_INTR_COUNT ((DPMAIF_INTR_MAX) - (DPMAIF_INTR_MIN) - 1)
new file mode 100644
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022, MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+
+#include "mtk_dev.h"
+#include "mtk_fsm.h"
+
+#define MTK_EXCEPT_HOST_RESET_TIME (2)
+#define MTK_EXCEPT_SELF_RESET_TIME (35)
+#define MTK_EXCEPT_RESET_TYPE_PLDR BIT(26)
+#define MTK_EXCEPT_RESET_TYPE_FLDR BIT(27)
+
+static void mtk_except_start_monitor(struct mtk_md_dev *mdev, unsigned long expires)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ if (!timer_pending(&except->timer) && !mtk_hw_get_hp_status(mdev)) {
+ except->timer.expires = jiffies + expires;
+ add_timer(&except->timer);
+ dev_info(mdev->dev, "Add timer to monitor PCI link\n");
+ }
+}
+
+int mtk_except_report_evt(struct mtk_md_dev *mdev, enum mtk_except_evt evt)
+{
+ struct mtk_md_except *except = &mdev->except;
+ int err, val;
+
+ if (atomic_read(&except->flag) != 1)
+ return -EFAULT;
+
+ switch (evt) {
+ case EXCEPT_LINK_ERR:
+ err = mtk_hw_mmio_check(mdev);
+ if (err)
+ mtk_fsm_evt_submit(mdev, FSM_EVT_LINKDOWN, FSM_F_DFLT, NULL, 0, 0);
+ break;
+ case EXCEPT_RGU:
+ /* delay 20ms to make sure device ready for reset */
+ msleep(20);
+
+ val = mtk_hw_get_dev_state(mdev);
+ dev_info(mdev->dev, "dev_state:0x%x, hw_ver:0x%x, fsm state:%d\n",
+ val, mdev->hw_ver, mdev->fsm->state);
+
+ /* Invalid dev state will trigger PLDR */
+ if (val & MTK_EXCEPT_RESET_TYPE_PLDR) {
+ except->type = RESET_PLDR;
+ } else if (val & MTK_EXCEPT_RESET_TYPE_FLDR) {
+ except->type = RESET_FLDR;
+ } else if (mdev->fsm->state >= FSM_STATE_READY) {
+ dev_info(mdev->dev, "HW reboot\n");
+ except->type = RESET_NONE;
+ } else {
+ dev_info(mdev->dev, "RGU ignored\n");
+ break;
+ }
+ mtk_fsm_evt_submit(mdev, FSM_EVT_DEV_RESET_REQ, FSM_F_DFLT, NULL, 0, 0);
+ break;
+ case EXCEPT_AER_DETECTED:
+ mtk_fsm_evt_submit(mdev, FSM_EVT_AER, FSM_F_DFLT, NULL, 0, EVT_MODE_BLOCKING);
+ break;
+ case EXCEPT_AER_RESET:
+ err = mtk_hw_reset(mdev, RESET_FLDR);
+ if (err)
+ mtk_hw_reset(mdev, RESET_RGU);
+ break;
+ case EXCEPT_AER_RESUME:
+ mtk_except_start_monitor(mdev, HZ);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void mtk_except_start(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ mtk_hw_unmask_irq(mdev, except->pci_ext_irq_id);
+}
+
+void mtk_except_stop(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ mtk_hw_mask_irq(mdev, except->pci_ext_irq_id);
+}
+
+static void mtk_except_fsm_handler(struct mtk_fsm_param *param, void *data)
+{
+ struct mtk_md_except *except = data;
+ enum mtk_reset_type reset_type;
+ struct mtk_md_dev *mdev;
+ unsigned long expires;
+ int err;
+
+ mdev = container_of(except, struct mtk_md_dev, except);
+
+ switch (param->to) {
+ case FSM_STATE_POSTDUMP:
+ mtk_hw_mask_irq(mdev, except->pci_ext_irq_id);
+ mtk_hw_clear_irq(mdev, except->pci_ext_irq_id);
+ mtk_hw_unmask_irq(mdev, except->pci_ext_irq_id);
+ break;
+ case FSM_STATE_OFF:
+ if (param->evt_id == FSM_EVT_DEV_RESET_REQ)
+ reset_type = except->type;
+ else if (param->evt_id == FSM_EVT_LINKDOWN)
+ reset_type = RESET_FLDR;
+ else
+ break;
+
+ if (reset_type == RESET_NONE) {
+ expires = MTK_EXCEPT_SELF_RESET_TIME * HZ;
+ } else {
+ err = mtk_hw_reset(mdev, reset_type);
+ if (err)
+ expires = MTK_EXCEPT_SELF_RESET_TIME * HZ;
+ else
+ expires = MTK_EXCEPT_HOST_RESET_TIME * HZ;
+ }
+
+ mtk_except_start_monitor(mdev, expires);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mtk_except_link_monitor(struct timer_list *timer)
+{
+ struct mtk_md_except *except = container_of(timer, struct mtk_md_except, timer);
+ struct mtk_md_dev *mdev = container_of(except, struct mtk_md_dev, except);
+ int err;
+
+ err = mtk_hw_link_check(mdev);
+ if (!err) {
+ mtk_fsm_evt_submit(mdev, FSM_EVT_REINIT, FSM_F_FULL_REINIT, NULL, 0, 0);
+ del_timer(&except->timer);
+ } else {
+ mod_timer(timer, jiffies + HZ);
+ }
+}
+
+int mtk_except_init(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ except->pci_ext_irq_id = mtk_hw_get_irq_id(mdev, MTK_IRQ_SRC_SAP_RGU);
+
+ mtk_fsm_notifier_register(mdev, MTK_USER_EXCEPT,
+ mtk_except_fsm_handler, except, FSM_PRIO_1, false);
+ timer_setup(&except->timer, mtk_except_link_monitor, 0);
+ atomic_set(&except->flag, 1);
+
+ return 0;
+}
+
+int mtk_except_exit(struct mtk_md_dev *mdev)
+{
+ struct mtk_md_except *except = &mdev->except;
+
+ atomic_set(&except->flag, 0);
+ del_timer(&except->timer);
+ mtk_fsm_notifier_unregister(mdev, MTK_USER_EXCEPT);
+
+ return 0;
+}
@@ -516,6 +516,8 @@ static int mtk_fsm_early_bootup_handler(u32 status, void *__fsm)
dev_stage = dev_state & REGION_BITMASK;
if (dev_stage >= DEV_STAGE_MAX) {
dev_err(mdev->dev, "Invalid dev state 0x%x\n", dev_state);
+ if (mtk_hw_link_check(mdev))
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
return -ENXIO;
}
@@ -364,8 +364,10 @@ static void mtk_cldma_tx_done_work(struct work_struct *work)
state = mtk_cldma_check_intr_status(mdev, txq->hw->base_addr,
DIR_TX, txq->txqno, QUEUE_XFER_DONE);
if (state) {
- if (unlikely(state == LINK_ERROR_VAL))
+ if (unlikely(state == LINK_ERROR_VAL)) {
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
return;
+ }
mtk_cldma_clr_intr_status(mdev, txq->hw->base_addr, DIR_TX,
txq->txqno, QUEUE_XFER_DONE);
@@ -451,6 +453,11 @@ static void mtk_cldma_rx_done_work(struct work_struct *work)
if (!state)
break;
+ if (unlikely(state == LINK_ERROR_VAL)) {
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
+ return;
+ }
+
mtk_cldma_clr_intr_status(mdev, rxq->hw->base_addr, DIR_RX,
rxq->rxqno, QUEUE_XFER_DONE);
@@ -751,6 +758,9 @@ int mtk_cldma_txq_free_t800(struct cldma_hw *hw, int vqno)
devm_kfree(hw->mdev->dev, txq);
hw->txq[txqno] = NULL;
+ if (active == LINK_ERROR_VAL)
+ mtk_except_report_evt(hw->mdev, EXCEPT_LINK_ERR);
+
return 0;
}
@@ -906,6 +916,9 @@ int mtk_cldma_rxq_free_t800(struct cldma_hw *hw, int vqno)
devm_kfree(mdev->dev, rxq);
hw->rxq[rxqno] = NULL;
+ if (active == LINK_ERROR_VAL)
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
+
return 0;
}
@@ -518,6 +518,8 @@ static int mtk_pci_reset(struct mtk_md_dev *mdev, enum mtk_reset_type type)
return mtk_pci_fldr(mdev);
case RESET_PLDR:
return mtk_pci_pldr(mdev);
+ default:
+ break;
}
return -EINVAL;
@@ -529,6 +531,12 @@ static int mtk_pci_reinit(struct mtk_md_dev *mdev, enum mtk_reinit_type type)
struct mtk_pci_priv *priv = mdev->hw_priv;
int ret, ltr, l1ss;
+ if (type == REINIT_TYPE_EXP) {
+ /* We have saved it in probe() */
+ pci_load_saved_state(pdev, priv->saved_state);
+ pci_restore_state(pdev);
+ }
+
/* restore ltr */
ltr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (ltr) {
@@ -553,6 +561,9 @@ static int mtk_pci_reinit(struct mtk_md_dev *mdev, enum mtk_reinit_type type)
mtk_pci_set_msix_merged(priv, priv->irq_cnt);
}
+ if (type == REINIT_TYPE_EXP)
+ mtk_pci_clear_irq(mdev, priv->rgu_irq_id);
+
mtk_pci_unmask_irq(mdev, priv->rgu_irq_id);
mtk_pci_unmask_irq(mdev, priv->mhccif_irq_id);
@@ -616,6 +627,7 @@ static const struct mtk_hw_ops mtk_pci_ops = {
.get_ext_evt_status = mtk_mhccif_get_evt_status,
.reset = mtk_pci_reset,
.reinit = mtk_pci_reinit,
+ .link_check = mtk_pci_link_check,
.mmio_check = mtk_pci_mmio_check,
.get_hp_status = mtk_pci_get_hp_status,
};
@@ -636,6 +648,7 @@ static void mtk_mhccif_isr_work(struct work_struct *work)
if (unlikely(stat == U32_MAX && mtk_pci_link_check(mdev))) {
/* When link failed, we don't need to unmask/clear. */
dev_err(mdev->dev, "Failed to check link in MHCCIF handler.\n");
+ mtk_except_report_evt(mdev, EXCEPT_LINK_ERR);
return;
}
@@ -760,6 +773,7 @@ static void mtk_rgu_work(struct work_struct *work)
struct mtk_pci_priv *priv;
struct mtk_md_dev *mdev;
struct pci_dev *pdev;
+ int ret;
priv = container_of(to_delayed_work(work), struct mtk_pci_priv, rgu_work);
mdev = priv->mdev;
@@ -770,6 +784,10 @@ static void mtk_rgu_work(struct work_struct *work)
mtk_pci_mask_irq(mdev, priv->rgu_irq_id);
mtk_pci_clear_irq(mdev, priv->rgu_irq_id);
+ ret = mtk_except_report_evt(mdev, EXCEPT_RGU);
+ if (ret)
+ dev_err(mdev->dev, "Failed to report exception with EXCEPT_RGU\n");
+
if (!pdev->msix_enabled)
return;
@@ -782,8 +800,14 @@ static int mtk_rgu_irq_cb(int irq_id, void *data)
struct mtk_pci_priv *priv;
priv = mdev->hw_priv;
+
+ if (delayed_work_pending(&priv->rgu_work))
+ goto exit;
+
schedule_delayed_work(&priv->rgu_work, msecs_to_jiffies(1));
+ dev_info(mdev->dev, "RGU IRQ arrived\n");
+exit:
return 0;
}
@@ -1105,16 +1129,39 @@ static void mtk_pci_remove(struct pci_dev *pdev)
static pci_ers_result_t mtk_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
+ struct mtk_md_dev *mdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = mtk_except_report_evt(mdev, EXCEPT_AER_DETECTED);
+ if (ret)
+ dev_err(mdev->dev, "Failed to call excpetion report API with EXCEPT_AER_DETECTED!\n");
+ dev_info(mdev->dev, "AER detected: pci_channel_state_t=%d\n", state);
+
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t mtk_pci_slot_reset(struct pci_dev *pdev)
{
+ struct mtk_md_dev *mdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = mtk_except_report_evt(mdev, EXCEPT_AER_RESET);
+ if (ret)
+ dev_err(mdev->dev, "Failed to call excpetion report API with EXCEPT_AER_RESET!\n");
+ dev_info(mdev->dev, "Slot reset!\n");
+
return PCI_ERS_RESULT_RECOVERED;
}
static void mtk_pci_io_resume(struct pci_dev *pdev)
{
+ struct mtk_md_dev *mdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = mtk_except_report_evt(mdev, EXCEPT_AER_RESUME);
+ if (ret)
+ dev_err(mdev->dev, "Failed to call excpetion report API with EXCEPT_AER_RESUME!\n");
+ dev_info(mdev->dev, "IO resume!\n");
}
static const struct pci_error_handlers mtk_pci_err_handler = {